+ source /mnt/weka/home/hao.zhang/conda/miniconda/bin/activate ++ _CONDA_ROOT=/mnt/weka/home/hao.zhang/conda/miniconda ++ . /mnt/weka/home/hao.zhang/conda/miniconda/etc/profile.d/conda.sh +++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda +++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda +++ export _CE_M= +++ _CE_M= +++ export _CE_CONDA= +++ _CE_CONDA= +++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python +++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python +++ '[' -z x ']' ++ conda activate ++ local cmd=activate ++ case "$cmd" in ++ __conda_activate activate ++ '[' -n '' ']' ++ local ask_conda +++ PS1= +++ __conda_exe shell.posix activate +++ '[' -n '' ']' +++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate ++ ask_conda='unset _CE_M unset _CE_CONDA PS1='\''(base) '\'' export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\'' export CONDA_SHLVL='\''1'\'' export CONDA_PROMPT_MODIFIER='\''(base) '\'' export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\'' export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\''' ++ eval 'unset _CE_M unset _CE_CONDA PS1='\''(base) '\'' export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\'' export CONDA_SHLVL='\''1'\'' export CONDA_PROMPT_MODIFIER='\''(base) '\'' export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\'' export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\''' +++ unset _CE_M +++ unset _CE_CONDA +++ PS1='(base) ' +++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin +++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin +++ export CONDA_SHLVL=1 +++ CONDA_SHLVL=1 +++ export 'CONDA_PROMPT_MODIFIER=(base) ' +++ CONDA_PROMPT_MODIFIER='(base) ' +++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda +++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda +++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python +++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python ++ __conda_hashr ++ '[' -n '' ']' ++ '[' -n '' ']' ++ hash -r + conda activate junda-attnserver + local cmd=activate + case "$cmd" in + __conda_activate activate junda-attnserver + '[' -n '' ']' + local ask_conda ++ PS1='(base) ' ++ __conda_exe shell.posix activate junda-attnserver ++ '[' -n '' ']' ++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate junda-attnserver + ask_conda='unset _CE_M unset _CE_CONDA PS1='\''(junda-attnserver) '\'' export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\'' export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\'' export CONDA_SHLVL='\''2'\'' export CONDA_DEFAULT_ENV='\''junda-attnserver'\'' export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\'' export CONDA_PREFIX_1='\''/mnt/weka/home/hao.zhang/conda/miniconda'\'' export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\'' export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\''' + eval 'unset _CE_M unset _CE_CONDA PS1='\''(junda-attnserver) '\'' export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\'' export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\'' export CONDA_SHLVL='\''2'\'' export CONDA_DEFAULT_ENV='\''junda-attnserver'\'' export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\'' export CONDA_PREFIX_1='\''/mnt/weka/home/hao.zhang/conda/miniconda'\'' export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\'' export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\''' ++ unset _CE_M ++ unset _CE_CONDA ++ PS1='(junda-attnserver) ' ++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin ++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin ++ export CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver ++ CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver ++ export CONDA_SHLVL=2 ++ CONDA_SHLVL=2 ++ export CONDA_DEFAULT_ENV=junda-attnserver ++ CONDA_DEFAULT_ENV=junda-attnserver ++ export 'CONDA_PROMPT_MODIFIER=(junda-attnserver) ' ++ CONDA_PROMPT_MODIFIER='(junda-attnserver) ' ++ export CONDA_PREFIX_1=/mnt/weka/home/hao.zhang/conda/miniconda ++ CONDA_PREFIX_1=/mnt/weka/home/hao.zhang/conda/miniconda ++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda ++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda ++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python ++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python + __conda_hashr + '[' -n '' ']' + '[' -n '' ']' + hash -r + export CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5 + CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5 + mkdir -p /mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5 + export PROF_TP_SIZE=8 + PROF_TP_SIZE=8 + export PROF_CP_SIZE=4 + PROF_CP_SIZE=4 + export PROF_BS=32 + PROF_BS=32 + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=1024 + PROF_CTX_LENGTH=1024 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L1024*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L1024*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=1024, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:08:49.911000 121562 site-packages/torch/distributed/run.py:766] W0621 21:08:49.911000 121562 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:08:49.911000 121562 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:08:49.911000 121562 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:08:49.997000 2041342 site-packages/torch/distributed/run.py:766] W0621 21:08:49.997000 2041342 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:08:49.997000 2041342 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:08:49.997000 2041342 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:08:49.997000 3410371 site-packages/torch/distributed/run.py:766] W0621 21:08:49.997000 3410371 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:08:49.997000 3410371 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:08:49.997000 3410371 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:08:50.019000 3341756 site-packages/torch/distributed/run.py:766] W0621 21:08:50.019000 3341756 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:08:50.019000 3341756 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:08:50.019000 3341756 site-packages/torch/distributed/run.py:766] ***************************************** [rank11]:[W621 21:09:12.637753438 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:09:12.752267112 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:09:12.091938040 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:09:12.166070513 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:09:12.215759321 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:09:12.768283785 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:09:12.931353859 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:09:13.008028897 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:09:13.121543447 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:09:13.529719931 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:09:13.462694330 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:09:13.541511756 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:09:13.154583423 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:09:13.041437413 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:09:13.563369247 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:09:13.495291612 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:09:13.156537086 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:09:13.156583081 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:09:13.043627541 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:09:13.496887857 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:09:13.565345099 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:09:13.496960829 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:09:13.565401028 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:09:13.157366392 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:09:13.565513369 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:09:13.044220258 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:09:13.157431022 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:09:13.565729143 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:09:13.044303006 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:09:13.044317918 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:09:13.498075146 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:09:13.498158542 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank2]:[W621 21:09:53.798336084 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:09:53.862384522 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:09:53.863396010 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:09:53.926150275 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:09:53.928158706 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:09:53.929189951 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank0]:[W621 21:09:53.946641108 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:09:53.979433450 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:09:54.274397386 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:09:54.893743468 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:09:54.459750291 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:09:54.006746141 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:09:54.012611077 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank16]:[W621 21:09:54.952250503 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:09:54.954278326 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank8]:[W621 21:09:54.509745803 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:09:54.993445347 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:09:54.062176104 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:09:54.069106016 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank24]:[W621 21:09:54.081600637 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:09:54.014082039 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:09:54.083266574 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:09:54.562060773 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:09:54.572799412 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:09:54.578110403 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:09:54.582117302 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:09:54.122980893 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:09:54.602716832 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:09:54.061304584 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:09:54.119468477 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:09:54.126560454 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:09:54.215627152 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) + set +x + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=2048 + PROF_CTX_LENGTH=2048 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L2048*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L2048*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=2048, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh rm: cannot remove 'gpt-checkpoint/': Is a directory + which python3 + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 2048 --max-position-embeddings 2048 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 2048 --max-position-embeddings 2048 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 2048 --max-position-embeddings 2048 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 2048 --max-position-embeddings 2048 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:10:03.231000 125170 site-packages/torch/distributed/run.py:766] W0621 21:10:03.231000 125170 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:10:03.231000 125170 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:10:03.231000 125170 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:10:03.355000 2044864 site-packages/torch/distributed/run.py:766] W0621 21:10:03.355000 2044864 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:10:03.355000 2044864 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:10:03.355000 2044864 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:10:03.403000 3414171 site-packages/torch/distributed/run.py:766] W0621 21:10:03.403000 3414171 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:10:03.403000 3414171 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:10:03.403000 3414171 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:10:03.410000 3345557 site-packages/torch/distributed/run.py:766] W0621 21:10:03.410000 3345557 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:10:03.410000 3345557 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:10:03.410000 3345557 site-packages/torch/distributed/run.py:766] ***************************************** [rank16]:[W621 21:10:26.960639601 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:10:26.533719904 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:10:26.693435126 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:10:26.013881083 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:10:26.902315100 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:10:26.355591684 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:10:26.018806535 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:10:26.906584384 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:10:26.360426861 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:10:26.426308071 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:10:26.427810329 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:10:26.431655004 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:10:27.038428882 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:10:27.447550021 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:10:27.927430106 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:10:27.381712970 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:10:27.935303653 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:10:27.049774488 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:10:27.390370445 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:10:27.463471692 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:10:27.056657569 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:10:27.944517289 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:10:27.057820671 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:10:27.945003600 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:10:27.398249097 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:10:27.398280272 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:10:27.466980130 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:10:27.399461190 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:10:27.468518646 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:10:27.468618452 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:10:27.947586173 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:10:27.062686147 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank11]: batch = get_batch_on_this_cp_rank(batch) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank11]: val = val.index_select(seq_dim, index) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank31]: batch = get_batch_on_this_cp_rank(batch) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank31]: val = val.index_select(seq_dim, index) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank20]: batch = get_batch_on_this_cp_rank(batch) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank20]: val = val.index_select(seq_dim, index) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank10]: batch = get_batch_on_this_cp_rank(batch) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank29]: batch = get_batch_on_this_cp_rank(batch) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank10]: val = val.index_select(seq_dim, index) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: val = val.index_select(seq_dim, index) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank18]: batch = get_batch_on_this_cp_rank(batch) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank18]: val = val.index_select(seq_dim, index) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank4]: batch = get_batch_on_this_cp_rank(batch) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank4]: val = val.index_select(seq_dim, index) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank26]: batch = get_batch_on_this_cp_rank(batch) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank26]: val = val.index_select(seq_dim, index) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank5]: batch = get_batch_on_this_cp_rank(batch) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank5]: val = val.index_select(seq_dim, index) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank2]: batch = get_batch_on_this_cp_rank(batch) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank2]: val = val.index_select(seq_dim, index) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank12]: batch = get_batch_on_this_cp_rank(batch) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank12]: val = val.index_select(seq_dim, index) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank19]: batch = get_batch_on_this_cp_rank(batch) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank19]: val = val.index_select(seq_dim, index) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank27]: batch = get_batch_on_this_cp_rank(batch) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank27]: val = val.index_select(seq_dim, index) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank1]: batch = get_batch_on_this_cp_rank(batch) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank1]: val = val.index_select(seq_dim, index) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank13]: batch = get_batch_on_this_cp_rank(batch) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank13]: val = val.index_select(seq_dim, index) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank6]: batch = get_batch_on_this_cp_rank(batch) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank6]: val = val.index_select(seq_dim, index) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank17]: batch = get_batch_on_this_cp_rank(batch) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank17]: val = val.index_select(seq_dim, index) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank3]: batch = get_batch_on_this_cp_rank(batch) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank30]: batch = get_batch_on_this_cp_rank(batch) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank30]: val = val.index_select(seq_dim, index) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank3]: val = val.index_select(seq_dim, index) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank9]: batch = get_batch_on_this_cp_rank(batch) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank16]: batch = get_batch_on_this_cp_rank(batch) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank16]: val = val.index_select(seq_dim, index) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank9]: val = val.index_select(seq_dim, index) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank25]: batch = get_batch_on_this_cp_rank(batch) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank25]: val = val.index_select(seq_dim, index) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank0]: batch = get_batch_on_this_cp_rank(batch) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank0]: val = val.index_select(seq_dim, index) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank7]: batch = get_batch_on_this_cp_rank(batch) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank14]: batch = get_batch_on_this_cp_rank(batch) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank14]: val = val.index_select(seq_dim, index) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank22]: batch = get_batch_on_this_cp_rank(batch) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank22]: val = val.index_select(seq_dim, index) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank7]: val = val.index_select(seq_dim, index) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank28]: batch = get_batch_on_this_cp_rank(batch) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank28]: val = val.index_select(seq_dim, index) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank15]: batch = get_batch_on_this_cp_rank(batch) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank15]: val = val.index_select(seq_dim, index) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank24]: batch = get_batch_on_this_cp_rank(batch) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank24]: val = val.index_select(seq_dim, index) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank8]: batch = get_batch_on_this_cp_rank(batch) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank23]: batch = get_batch_on_this_cp_rank(batch) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank23]: val = val.index_select(seq_dim, index) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank8]: val = val.index_select(seq_dim, index) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch [rank21]: batch = get_batch_on_this_cp_rank(batch) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank [rank21]: val = val.index_select(seq_dim, index) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 11.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]:[W621 21:10:36.267763226 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:10:36.384862722 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:10:36.454267453 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:10:36.483016941 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:10:36.542959595 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:10:36.546964871 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:10:36.548040228 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:10:36.890417352 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:10:36.907762679 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:10:36.989340512 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:10:36.925612925 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:10:36.483859649 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:10:36.484825771 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:10:36.016915836 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:10:36.495704476 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:10:36.025115792 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:10:36.504036838 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:10:36.957075410 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:10:36.972786600 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:10:36.041393056 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:10:36.977705086 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:10:36.055922527 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:10:36.056671633 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:10:36.066518810 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:10:36.570582727 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:10:36.578726412 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:10:36.585561004 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:10:36.290363744 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:10:37.162000 125170 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 125249 closing signal SIGTERM W0621 21:10:37.165000 125170 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 125250 closing signal SIGTERM W0621 21:10:37.166000 125170 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 125251 closing signal SIGTERM W0621 21:10:37.167000 125170 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 125252 closing signal SIGTERM W0621 21:10:37.167000 125170 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 125254 closing signal SIGTERM W0621 21:10:37.168000 125170 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 125255 closing signal SIGTERM W0621 21:10:37.168000 125170 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 125256 closing signal SIGTERM W0621 21:10:37.710000 3345557 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3345635 closing signal SIGTERM W0621 21:10:37.713000 3345557 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3345636 closing signal SIGTERM W0621 21:10:37.713000 3345557 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3345638 closing signal SIGTERM W0621 21:10:37.714000 3345557 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3345639 closing signal SIGTERM W0621 21:10:37.714000 3345557 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3345640 closing signal SIGTERM W0621 21:10:37.714000 3345557 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3345641 closing signal SIGTERM W0621 21:10:37.714000 3345557 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3345642 closing signal SIGTERM W0621 21:10:37.825000 2044864 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2044941 closing signal SIGTERM W0621 21:10:37.828000 2044864 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2044942 closing signal SIGTERM W0621 21:10:37.829000 2044864 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2044943 closing signal SIGTERM W0621 21:10:37.829000 2044864 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2044944 closing signal SIGTERM W0621 21:10:37.830000 2044864 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2044945 closing signal SIGTERM W0621 21:10:37.830000 2044864 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2044947 closing signal SIGTERM W0621 21:10:37.830000 2044864 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2044948 closing signal SIGTERM W0621 21:10:38.038000 3414171 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3414247 closing signal SIGTERM W0621 21:10:38.041000 3414171 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3414248 closing signal SIGTERM W0621 21:10:38.041000 3414171 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3414251 closing signal SIGTERM W0621 21:10:38.042000 3414171 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3414254 closing signal SIGTERM E0621 21:10:38.248000 125170 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 4 (pid: 125253) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:10:37 host : fs-mbz-gpu-852 rank : 4 (local_rank: 4) exitcode : 1 (pid: 125253) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x E0621 21:10:38.747000 2044864 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 2044946) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:10:38.759000 2044864 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2044864_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:10:38.761000 3345557 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-881_3345557_0' has failed to send a keep-alive heartbeat to the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:10:38.198952574 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:53012, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14b6259785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14b60ec5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14b60ec5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14b60ec5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14b60ec57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14b60ec57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14b60ec58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14b61df8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14b61d6fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14b626d0dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14b626d0de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:10:38.771000 2044864 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2044864_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:10:38.210188927 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:53012, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14b6259785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14b60ec5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14b60ec5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14b60ec5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14b60ec57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14b60ec57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14b60ec58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14b61df8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14b61d6fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14b626d0dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14b626d0de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:10:38.781000 2044864 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2044864_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:10:37 host : fs-mbz-gpu-901 rank : 29 (local_rank: 5) exitcode : 1 (pid: 2044946) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:10:38.831000 3345557 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 3345637) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:10:38.201313248 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:56088, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x151f1d3785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x151f0665aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x151f0665c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x151f0665db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x151f06657ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x151f06657ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x151f06658f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x151f1598b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x151f150fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x151f1e6a9d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x151f1e6a9e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:10:38.844000 3345557 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3345557_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:10:38.215801717 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:56088, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x151f1d3785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x151f0665aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x151f0665c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x151f0665db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x151f06657ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x151f06657ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x151f06658f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x151f1598b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x151f150fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x151f1e6a9d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x151f1e6a9e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:10:38.855000 3345557 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3345557_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. E0621 21:10:38.857000 3414171 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 3414249) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:10:38.225875491 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:56088, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x151f1d3785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x151f0665aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x151f0665c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x151f0665db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x151f06657ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x151f06657ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x151f06658f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x151f1598b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x151f150fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x151f1e6a9d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x151f1e6a9e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:10:38.865000 3345557 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3345557_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent W0621 21:10:38.869000 3414171 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3414171_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:10:37 host : fs-mbz-gpu-881 rank : 18 (local_rank: 2) exitcode : 1 (pid: 3345637) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ [W621 21:10:38.787614233 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:50916, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x146b173785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x146b0025aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x146b0025c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x146b0025db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x146b00257ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x146b00257ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x146b00258f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x146b0f58b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x146b0ecfb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x146b18392d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x146b18392e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:10:38.881000 3414171 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3414171_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:10:38.799356450 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:50916, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x146b173785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x146b0025aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x146b0025c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x146b0025db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x146b00257ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x146b00257ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x146b00258f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x146b0f58b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x146b0ecfb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x146b18392d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x146b18392e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:10:38.891000 3414171 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3414171_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:10:38 host : fs-mbz-gpu-870 rank : 11 (local_rank: 3) exitcode : 1 (pid: 3414250) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [2]: time : 2025-06-21_21:10:38 host : fs-mbz-gpu-870 rank : 13 (local_rank: 5) exitcode : 1 (pid: 3414252) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [3]: time : 2025-06-21_21:10:38 host : fs-mbz-gpu-870 rank : 14 (local_rank: 6) exitcode : 1 (pid: 3414253) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:10:38 host : fs-mbz-gpu-870 rank : 10 (local_rank: 2) exitcode : 1 (pid: 3414249) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=4096 + PROF_CTX_LENGTH=4096 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L4096*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L4096*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=4096, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 4096 --max-position-embeddings 4096 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 4096 --max-position-embeddings 4096 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 4096 --max-position-embeddings 4096 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 4096 --max-position-embeddings 4096 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:10:41.620000 127109 site-packages/torch/distributed/run.py:766] W0621 21:10:41.620000 127109 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:10:41.620000 127109 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:10:41.620000 127109 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:10:41.820000 2046747 site-packages/torch/distributed/run.py:766] W0621 21:10:41.820000 2046747 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:10:41.820000 2046747 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:10:41.820000 2046747 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:10:41.824000 3347459 site-packages/torch/distributed/run.py:766] W0621 21:10:41.824000 3347459 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:10:41.824000 3347459 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:10:41.824000 3347459 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:10:41.886000 3416130 site-packages/torch/distributed/run.py:766] W0621 21:10:41.886000 3416130 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:10:41.886000 3416130 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:10:41.886000 3416130 site-packages/torch/distributed/run.py:766] ***************************************** [rank28]:[W621 21:11:05.186207716 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:11:05.186659326 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:11:05.186921394 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:11:05.186945587 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:11:05.186980322 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:11:05.779494646 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:11:05.779509318 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:11:05.779519110 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:11:05.779577683 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:11:05.666844633 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:11:05.666843268 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:11:05.119950127 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:11:05.119963523 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:11:05.119971930 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:11:05.779585545 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:11:05.667099883 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:11:05.120027168 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:11:05.120050056 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:11:05.120083980 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:11:05.779656837 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:11:05.667143483 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:11:05.667169347 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:11:05.192775608 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:11:05.674110814 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:11:06.887806372 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:11:06.023361967 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:11:07.458022525 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:11:07.071013797 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:11:07.958673192 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:11:07.480856266 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:11:07.420527501 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:11:07.473087277 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]:[W621 21:11:17.282952772 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:11:17.366455244 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:11:17.413942450 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:11:17.416550753 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:11:17.420019493 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:11:17.838951977 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:11:17.318495345 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:11:17.442674379 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:11:17.368501571 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:11:17.482818411 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:11:17.381941348 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:11:17.401779895 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:11:17.865709893 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:11:17.885121568 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:11:17.954029169 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:11:17.954656709 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:11:17.433488701 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:11:17.974199020 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:11:17.977501964 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:11:17.466424412 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:11:17.472970971 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:11:17.959777398 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:11:17.962574080 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:11:17.041714686 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:11:17.973495361 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:11:17.065840770 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:11:17.997374888 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:11:17.037022112 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:11:18.202000 127109 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 127181 closing signal SIGTERM W0621 21:11:18.204000 127109 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 127182 closing signal SIGTERM W0621 21:11:18.205000 127109 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 127183 closing signal SIGTERM W0621 21:11:18.205000 127109 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 127184 closing signal SIGTERM W0621 21:11:18.206000 127109 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 127186 closing signal SIGTERM W0621 21:11:18.206000 127109 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 127187 closing signal SIGTERM W0621 21:11:18.207000 127109 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 127188 closing signal SIGTERM W0621 21:11:18.417000 3347459 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3347529 closing signal SIGTERM W0621 21:11:18.422000 3347459 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3347530 closing signal SIGTERM W0621 21:11:18.423000 3347459 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3347531 closing signal SIGTERM W0621 21:11:18.424000 3347459 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3347533 closing signal SIGTERM W0621 21:11:18.424000 3347459 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3347534 closing signal SIGTERM W0621 21:11:18.424000 3347459 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3347536 closing signal SIGTERM W0621 21:11:18.427000 3416130 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3416200 closing signal SIGTERM W0621 21:11:18.429000 2046747 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2046817 closing signal SIGTERM W0621 21:11:18.430000 3416130 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3416201 closing signal SIGTERM W0621 21:11:18.432000 2046747 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2046818 closing signal SIGTERM W0621 21:11:18.432000 2046747 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2046819 closing signal SIGTERM W0621 21:11:18.433000 2046747 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2046821 closing signal SIGTERM W0621 21:11:18.432000 3416130 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3416202 closing signal SIGTERM W0621 21:11:18.433000 3416130 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3416203 closing signal SIGTERM W0621 21:11:18.433000 2046747 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2046824 closing signal SIGTERM W0621 21:11:18.433000 3416130 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3416204 closing signal SIGTERM W0621 21:11:18.434000 3416130 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3416205 closing signal SIGTERM W0621 21:11:18.434000 3416130 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3416207 closing signal SIGTERM E0621 21:11:18.872000 127109 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 4 (pid: 127185) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:11:18 host : fs-mbz-gpu-852 rank : 4 (local_rank: 4) exitcode : 1 (pid: 127185) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:11:19.049000 3416130 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 6 (pid: 3416206) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 E0621 21:11:19.048000 2046747 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 3 (pid: 2046820) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:11:19.063000 2046747 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2046747_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:11:19.062000 3416130 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3416130_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:11:19.503027977 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:56628, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1548e8d785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1548d205aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x1548d205c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x1548d205db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x1548d2057ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x1548d2057ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x1548d2058f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x1548e138b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x1548e0afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x1548ea0f5d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x1548ea0f5e40 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:11:19.981720622 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:47980, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x146c111785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x146bfa45aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x146bfa45c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x146bfa45db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x146bfa457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x146bfa457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x146bfa458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x146c0978b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x146c08efb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x146c1246ad90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x146c1246ae40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:11:19.074000 2046747 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2046747_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:11:19.079000 3416130 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3416130_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:11:19.513738216 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:56628, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1548e8d785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1548d205aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x1548d205c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x1548d205db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x1548d2057ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x1548d2057ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x1548d2058f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x1548e138b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x1548e0afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x1548ea0f5d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x1548ea0f5e40 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:11:19.997087513 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:47980, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x146c111785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x146bfa45aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x146bfa45c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) W0621 21:11:19.084000 2046747 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2046747_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. frame #3: + 0x5babb3e (0x146bfa45db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x146bfa457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x146bfa457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) Traceback (most recent call last): File "", line 198, in _run_module_as_main frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x146bfa458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x146c0978b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x146c08efb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x146c1246ad90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x146c1246ae40 in /lib/x86_64-linux-gnu/libc.so.6) File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run W0621 21:11:19.089000 3416130 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3416130_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ Traceback (most recent call last): return launch_agent(self._config, self._entrypoint, list(args)) File "", line 198, in _run_module_as_main ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:11:18 host : fs-mbz-gpu-901 rank : 29 (local_rank: 5) exitcode : 1 (pid: 2046822) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [2]: time : 2025-06-21_21:11:18 host : fs-mbz-gpu-901 rank : 30 (local_rank: 6) exitcode : 1 (pid: 2046823) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:11:18 host : fs-mbz-gpu-901 rank : 27 (local_rank: 3) exitcode : 1 (pid: 2046820) error_file: main() traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:11:18 host : fs-mbz-gpu-870 rank : 14 (local_rank: 6) exitcode : 1 (pid: 3416206) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x E0621 21:11:19.291000 3347459 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 3 (pid: 3347532) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:11:19.302000 3347459 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3347459_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:11:19.674005710 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:43948, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x147a33f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x147a1d25aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x147a1d25c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x147a1d25db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x147a1d257ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x147a1d257ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x147a1d258f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x147a2c58b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x147a2bcfb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x147a3527cd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x147a3527ce40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:11:19.314000 3347459 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3347459_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:11:19.685005954 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:43948, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x147a33f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x147a1d25aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x147a1d25c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x147a1d25db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x147a1d257ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x147a1d257ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x147a1d258f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x147a2c58b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x147a2bcfb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x147a3527cd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x147a3527ce40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:11:19.324000 3347459 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3347459_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:11:18 host : fs-mbz-gpu-881 rank : 22 (local_rank: 6) exitcode : 1 (pid: 3347535) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:11:18 host : fs-mbz-gpu-881 rank : 19 (local_rank: 3) exitcode : 1 (pid: 3347532) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=8192 + PROF_CTX_LENGTH=8192 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=8192, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:11:22.118000 128998 site-packages/torch/distributed/run.py:766] W0621 21:11:22.118000 128998 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:11:22.118000 128998 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:11:22.118000 128998 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:11:22.181000 2048584 site-packages/torch/distributed/run.py:766] W0621 21:11:22.181000 2048584 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:11:22.181000 2048584 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:11:22.181000 2048584 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:11:22.188000 3349295 site-packages/torch/distributed/run.py:766] W0621 21:11:22.188000 3349295 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:11:22.188000 3349295 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:11:22.188000 3349295 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:11:22.189000 3417967 site-packages/torch/distributed/run.py:766] W0621 21:11:22.189000 3417967 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:11:22.189000 3417967 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:11:22.189000 3417967 site-packages/torch/distributed/run.py:766] ***************************************** [rank16]:[W621 21:11:46.482367410 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:11:46.039260066 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:11:46.223980470 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:11:46.762165245 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:11:46.380147106 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:11:46.380160018 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:11:46.788467999 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:11:46.266892412 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:11:46.720074518 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:11:46.720092124 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:11:46.788529463 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:11:46.267206455 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:11:46.380792698 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:11:46.268163287 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:11:46.721777172 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:11:46.791984112 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:11:47.200101034 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:11:47.200135150 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:11:47.794476303 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:11:47.794482357 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:11:47.681120328 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:11:47.134379487 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:11:47.681247641 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:11:47.134604457 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:11:47.798514510 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:11:47.798537100 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:11:47.208474044 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:11:47.686245267 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:11:47.139889931 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:11:47.209199871 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:11:47.686278022 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:11:47.139926361 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.32 GiB is free. Including non-PyTorch memory, this process has 2.49 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.32 GiB is free. Including non-PyTorch memory, this process has 2.49 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.50 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.32 GiB is free. Including non-PyTorch memory, this process has 2.49 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.32 GiB is free. Including non-PyTorch memory, this process has 2.49 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.51 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.50 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.50 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.50 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.51 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.50 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.51 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.50 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.28 GiB is free. Including non-PyTorch memory, this process has 2.52 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.51 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.32 GiB is free. Including non-PyTorch memory, this process has 2.49 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.32 GiB is free. Including non-PyTorch memory, this process has 2.49 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.51 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.28 GiB is free. Including non-PyTorch memory, this process has 2.52 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.51 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.32 GiB is free. Including non-PyTorch memory, this process has 2.49 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.28 GiB is free. Including non-PyTorch memory, this process has 2.52 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.28 GiB is free. Including non-PyTorch memory, this process has 2.52 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.32 GiB is free. Including non-PyTorch memory, this process has 2.49 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.28 GiB is free. Including non-PyTorch memory, this process has 2.52 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.50 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.28 GiB is free. Including non-PyTorch memory, this process has 2.52 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.51 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.50 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.28 GiB is free. Including non-PyTorch memory, this process has 2.52 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.30 GiB is free. Including non-PyTorch memory, this process has 2.51 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2048.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.28 GiB is free. Including non-PyTorch memory, this process has 2.52 GiB memory in use. Of the allocated memory 952.49 MiB is allocated by PyTorch, and 59.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]:[W621 21:11:56.861142220 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:11:56.869984478 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:11:56.828359186 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:11:56.324764816 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:11:56.007977750 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:11:57.043233641 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:11:57.393322564 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:11:57.476492426 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:11:57.110300149 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:11:57.142374288 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:11:57.564637250 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:11:57.043372955 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:11:57.581247386 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:11:57.582313262 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:11:57.525358711 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:11:57.537106332 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:11:57.625075528 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:11:57.564581015 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:11:57.633627520 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:11:57.569608105 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:11:57.654487848 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:11:57.259238548 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:11:57.603636447 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:11:57.173488567 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:11:57.233493927 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:11:57.241454767 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:11:57.267764899 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:11:57.305731698 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:11:57.797000 128998 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 129071 closing signal SIGTERM W0621 21:11:57.800000 128998 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 129073 closing signal SIGTERM W0621 21:11:57.800000 128998 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 129075 closing signal SIGTERM W0621 21:11:57.800000 128998 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 129076 closing signal SIGTERM W0621 21:11:57.801000 128998 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 129077 closing signal SIGTERM W0621 21:11:57.801000 128998 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 129078 closing signal SIGTERM W0621 21:11:57.801000 128998 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 129079 closing signal SIGTERM W0621 21:11:57.909000 3349295 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3349367 closing signal SIGTERM W0621 21:11:57.913000 3349295 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3349369 closing signal SIGTERM W0621 21:11:57.914000 3349295 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3349370 closing signal SIGTERM W0621 21:11:57.914000 3349295 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3349371 closing signal SIGTERM W0621 21:11:57.914000 3349295 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3349372 closing signal SIGTERM W0621 21:11:57.915000 3349295 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3349373 closing signal SIGTERM W0621 21:11:57.915000 3349295 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3349374 closing signal SIGTERM W0621 21:11:58.008000 2048584 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2048654 closing signal SIGTERM W0621 21:11:58.011000 2048584 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2048655 closing signal SIGTERM W0621 21:11:58.012000 2048584 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2048656 closing signal SIGTERM W0621 21:11:58.013000 2048584 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2048657 closing signal SIGTERM W0621 21:11:58.007000 3417967 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3418036 closing signal SIGTERM W0621 21:11:58.010000 3417967 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3418037 closing signal SIGTERM W0621 21:11:58.014000 2048584 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2048660 closing signal SIGTERM W0621 21:11:58.011000 3417967 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3418038 closing signal SIGTERM W0621 21:11:58.012000 3417967 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3418039 closing signal SIGTERM W0621 21:11:58.014000 2048584 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2048661 closing signal SIGTERM W0621 21:11:58.012000 3417967 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3418040 closing signal SIGTERM W0621 21:11:58.012000 3417967 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3418042 closing signal SIGTERM W0621 21:11:58.013000 3417967 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3418043 closing signal SIGTERM E0621 21:11:58.567000 128998 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 129074) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:11:57 host : fs-mbz-gpu-852 rank : 2 (local_rank: 2) exitcode : 1 (pid: 129074) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:11:58.779000 2048584 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 4 (pid: 2048658) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:11:58.229882453 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:42490, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x145d787785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x145d6165aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x145d6165c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x145d6165c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x145d6165d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x145d616574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x145d7098b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x145d700fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x145d7973ed90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x145d7973ee40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:11:58.802000 2048584 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2048584_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:11:58.242230948 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:42490, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x145d787785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x145d6165aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x145d6165c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x145d6165db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x145d61657569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x145d7098b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x145d700fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x145d7973ed90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x145d7973ee40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:11:58.812000 2048584 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2048584_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:11:58.251070047 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:42490, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x145d787785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x145d6165aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x145d6165c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x145d6165db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x145d61657569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x145d7098b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x145d700fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x145d7973ed90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x145d7973ee40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:11:58.821000 2048584 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2048584_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:11:58 host : fs-mbz-gpu-901 rank : 29 (local_rank: 5) exitcode : 1 (pid: 2048659) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:11:58 host : fs-mbz-gpu-901 rank : 28 (local_rank: 4) exitcode : 1 (pid: 2048658) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x E0621 21:11:58.878000 3417967 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 3418041) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 E0621 21:11:58.881000 3349295 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 3349368) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:11:58.889000 3417967 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3417967_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:11:58.807716691 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:53694, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x152af5b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x152adee5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x152adee5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x152adee5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x152adee57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x152adee57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x152adee58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x152aee18b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x152aed8fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x152af6ef2d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x152af6ef2e40 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:11:58.262617539 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:44374, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14ee267785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14ee0fa5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x14ee0fa5c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x14ee0fa5c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x14ee0fa5d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x14ee0fa574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x14ee1ed8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x14ee1e4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x14ee27a22d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x14ee27a22e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:11:58.901000 3417967 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3417967_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:11:58.903000 3349295 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3349295_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:11:58.818608891 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:53694, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x152af5b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x152adee5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x152adee5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x152adee5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x152adee57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x152adee57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x152adee58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x152aee18b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x152aed8fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x152af6ef2d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x152af6ef2e40 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:11:58.274651580 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:44374, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14ee267785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14ee0fa5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14ee0fa5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14ee0fa5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14ee0fa57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14ee1ed8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14ee1e4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14ee27a22d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14ee27a22e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:11:58.911000 3417967 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3417967_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch W0621 21:11:58.913000 3349295 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3349295_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:11:58 host : fs-mbz-gpu-870 rank : 13 (local_rank: 5) exitcode : 1 (pid: 3418041) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ [W621 21:11:58.283802534 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:44374, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14ee267785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14ee0fa5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14ee0fa5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14ee0fa5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14ee0fa57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14ee1ed8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14ee1e4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14ee27a22d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14ee27a22e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:11:58.921000 3349295 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3349295_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:11:57 host : fs-mbz-gpu-881 rank : 17 (local_rank: 1) exitcode : 1 (pid: 3349368) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=12288 + PROF_CTX_LENGTH=12288 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L12288*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L12288*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=12288, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 12288 --max-position-embeddings 12288 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 12288 --max-position-embeddings 12288 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 12288 --max-position-embeddings 12288 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 12288 --max-position-embeddings 12288 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:12:01.721000 130872 site-packages/torch/distributed/run.py:766] W0621 21:12:01.721000 130872 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:12:01.721000 130872 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:12:01.721000 130872 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:12:01.887000 2050421 site-packages/torch/distributed/run.py:766] W0621 21:12:01.887000 2050421 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:12:01.887000 2050421 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:12:01.887000 2050421 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:12:01.905000 3351114 site-packages/torch/distributed/run.py:766] W0621 21:12:01.905000 3351114 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:12:01.905000 3351114 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:12:01.905000 3351114 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:12:01.914000 3419786 site-packages/torch/distributed/run.py:766] W0621 21:12:01.914000 3419786 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:12:01.914000 3419786 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:12:01.914000 3419786 site-packages/torch/distributed/run.py:766] ***************************************** [rank16]:[W621 21:12:25.626333470 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:12:25.183468844 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:12:25.322491886 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:12:25.605888535 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:12:25.492785679 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:12:25.946445037 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:12:25.016937210 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:12:25.504401789 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:12:25.618829136 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:12:25.027131082 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:12:25.958972447 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:12:25.027925905 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:12:25.974327966 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:12:25.635053763 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:12:25.522594851 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:12:25.636197703 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:12:25.522806126 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:12:25.976534552 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:12:25.637547131 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:12:25.525144686 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:12:25.639207461 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:12:25.639501817 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:12:25.526386194 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:12:25.980498523 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:12:25.528451121 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:12:25.981626129 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:12:25.051597693 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:12:25.052141117 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:12:25.052144225 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:12:25.052243357 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:12:25.984422259 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:12:25.056420356 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.06 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.07 GiB is free. Including non-PyTorch memory, this process has 2.73 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.07 GiB is free. Including non-PyTorch memory, this process has 2.73 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.04 GiB is free. Including non-PyTorch memory, this process has 2.77 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.07 GiB is free. Including non-PyTorch memory, this process has 2.73 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.05 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.06 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.06 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.04 GiB is free. Including non-PyTorch memory, this process has 2.77 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.07 GiB is free. Including non-PyTorch memory, this process has 2.73 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.06 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.04 GiB is free. Including non-PyTorch memory, this process has 2.77 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.07 GiB is free. Including non-PyTorch memory, this process has 2.73 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.04 GiB is free. Including non-PyTorch memory, this process has 2.77 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.06 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.05 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.05 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.07 GiB is free. Including non-PyTorch memory, this process has 2.73 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.05 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.04 GiB is free. Including non-PyTorch memory, this process has 2.77 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.06 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.05 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.04 GiB is free. Including non-PyTorch memory, this process has 2.77 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.07 GiB is free. Including non-PyTorch memory, this process has 2.73 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.05 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.07 GiB is free. Including non-PyTorch memory, this process has 2.73 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.05 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.05 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.04 GiB is free. Including non-PyTorch memory, this process has 2.77 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.06 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.04 GiB is free. Including non-PyTorch memory, this process has 2.77 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4608.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.06 GiB is free. Including non-PyTorch memory, this process has 2.75 GiB memory in use. Of the allocated memory 1.13 GiB is allocated by PyTorch, and 103.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank7]:[W621 21:12:35.333502590 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:12:35.353208097 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:12:35.386168588 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:12:35.312456150 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:12:35.843454839 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:12:35.447717557 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:12:35.386659068 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:12:35.387634726 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:12:35.505720325 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:12:35.507009801 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:12:35.924412893 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:12:35.924631038 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:12:35.407635924 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:12:35.880517453 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:12:35.958796506 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:12:35.976861554 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:12:35.980157438 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:12:35.576931746 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:12:35.989601791 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:12:35.936935541 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:12:35.938682411 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:12:35.574283744 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:12:35.051986617 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:12:35.617275971 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:12:35.107925605 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:12:35.123076738 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:12:35.685433633 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:12:35.148041408 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:12:36.121000 130872 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 130944 closing signal SIGTERM W0621 21:12:36.126000 130872 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 130945 closing signal SIGTERM W0621 21:12:36.127000 130872 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 130946 closing signal SIGTERM W0621 21:12:36.127000 130872 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 130947 closing signal SIGTERM W0621 21:12:36.128000 130872 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 130948 closing signal SIGTERM W0621 21:12:36.128000 130872 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 130949 closing signal SIGTERM W0621 21:12:36.128000 130872 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 130950 closing signal SIGTERM W0621 21:12:36.245000 3419786 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3419856 closing signal SIGTERM W0621 21:12:36.247000 3419786 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3419857 closing signal SIGTERM W0621 21:12:36.248000 3419786 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3419858 closing signal SIGTERM W0621 21:12:36.248000 3419786 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3419859 closing signal SIGTERM W0621 21:12:36.249000 3419786 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3419860 closing signal SIGTERM W0621 21:12:36.249000 3419786 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3419861 closing signal SIGTERM W0621 21:12:36.250000 3419786 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3419862 closing signal SIGTERM W0621 21:12:36.749000 3351114 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3351184 closing signal SIGTERM W0621 21:12:36.748000 2050421 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2050490 closing signal SIGTERM W0621 21:12:36.751000 2050421 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2050491 closing signal SIGTERM W0621 21:12:36.751000 3351114 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3351185 closing signal SIGTERM W0621 21:12:36.752000 2050421 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2050492 closing signal SIGTERM W0621 21:12:36.752000 3351114 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3351186 closing signal SIGTERM W0621 21:12:36.752000 2050421 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2050493 closing signal SIGTERM W0621 21:12:36.752000 2050421 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2050495 closing signal SIGTERM W0621 21:12:36.752000 3351114 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3351188 closing signal SIGTERM W0621 21:12:36.753000 3351114 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3351189 closing signal SIGTERM W0621 21:12:36.753000 2050421 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2050496 closing signal SIGTERM W0621 21:12:36.753000 3351114 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3351190 closing signal SIGTERM W0621 21:12:36.753000 3351114 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3351191 closing signal SIGTERM E0621 21:12:37.145000 130872 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 130951) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:12:36 host : fs-mbz-gpu-852 rank : 7 (local_rank: 7) exitcode : 1 (pid: 130951) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x E0621 21:12:37.516000 3419786 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 3419863) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:12:37.527000 3419786 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3419786_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:12:37.446368607 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:58628, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14ab1eb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14ab07a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14ab07a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14ab07a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14ab07a57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14ab07a57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14ab07a58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14ab16d8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14ab164fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14ab1fb8cd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14ab1fb8ce40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:12:37.539000 3419786 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3419786_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:12:37.457129103 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:58628, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14ab1eb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14ab07a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14ab07a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14ab07a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14ab07a57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14ab07a57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14ab07a58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14ab16d8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14ab164fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14ab1fb8cd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14ab1fb8ce40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:12:37.549000 3419786 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3419786_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:12:36 host : fs-mbz-gpu-870 rank : 15 (local_rank: 7) exitcode : 1 (pid: 3419863) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x E0621 21:12:37.819000 2050421 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 4 (pid: 2050494) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:12:36 host : fs-mbz-gpu-901 rank : 31 (local_rank: 7) exitcode : 1 (pid: 2050497) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:12:36 host : fs-mbz-gpu-901 rank : 28 (local_rank: 4) exitcode : 1 (pid: 2050494) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:12:37.920000 3351114 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 3 (pid: 3351187) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:12:37.932000 3351114 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3351114_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:12:37.303671757 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:33820, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14de379785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14de2085aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14de2085c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14de2085db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14de20857ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14de20857ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14de20858f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14de2fb8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14de2f2fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14de38a18d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14de38a18e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:12:37.944000 3351114 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3351114_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:12:37.315110454 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:33820, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14de379785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14de2085aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14de2085c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14de2085db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14de20857ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14de20857ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14de20858f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14de2fb8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14de2f2fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14de38a18d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14de38a18e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:12:37.954000 3351114 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3351114_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:12:36 host : fs-mbz-gpu-881 rank : 19 (local_rank: 3) exitcode : 1 (pid: 3351187) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=16384 + PROF_CTX_LENGTH=16384 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=16384, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:12:40.834000 132761 site-packages/torch/distributed/run.py:766] W0621 21:12:40.834000 132761 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:12:40.834000 132761 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:12:40.834000 132761 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:12:40.839000 3421622 site-packages/torch/distributed/run.py:766] W0621 21:12:40.839000 3421622 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:12:40.839000 3421622 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:12:40.839000 3421622 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:12:40.849000 3352950 site-packages/torch/distributed/run.py:766] W0621 21:12:40.849000 3352950 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:12:40.849000 3352950 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:12:40.849000 3352950 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:12:40.857000 2052241 site-packages/torch/distributed/run.py:766] W0621 21:12:40.857000 2052241 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:12:40.857000 2052241 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:12:40.857000 2052241 site-packages/torch/distributed/run.py:766] ***************************************** [rank24]:[W621 21:13:04.598722996 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:13:04.232962358 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:13:04.300018230 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:13:04.502120673 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:13:04.912121624 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:13:04.391096435 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:13:04.845289168 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:13:04.845334313 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:13:04.509745537 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:13:04.397474192 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:13:04.918985280 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:13:04.851027675 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:13:04.420620183 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:13:04.942472622 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:13:04.873990713 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:13:04.535087667 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:13:04.542406259 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:13:04.429884883 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:13:04.951533094 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:13:04.547004687 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:13:04.435366105 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:13:04.957250171 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:13:04.551002416 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:13:04.552229040 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:13:04.959845029 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:13:04.439369928 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:13:04.961181213 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:13:04.892770126 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:13:04.893030602 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:13:04.893107545 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:13:04.440824371 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:13:04.897570240 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank20]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank20]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank20]: ^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank20]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank20]: return _load_global_dist_base_checkpoint( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank20]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank20]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank3]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank3]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank3]: ^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank3]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank14]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank14]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank14]: ^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank14]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank26]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank26]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank26]: ^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank26]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank20]: checkpoint.load_state_dict( [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank20]: return arg(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank20]: return _load_state_dict( [rank20]: ^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank3]: return _load_global_dist_base_checkpoint( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank3]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank3]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank14]: return _load_global_dist_base_checkpoint( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank14]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank14]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank26]: return _load_global_dist_base_checkpoint( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank26]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank26]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank20]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank20]: raise result [rank20]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank20]: Traceback (most recent call last): (RANK 0) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank3]: checkpoint.load_state_dict( [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank3]: return arg(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank3]: return _load_state_dict( [rank3]: ^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank14]: checkpoint.load_state_dict( [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank14]: return arg(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank14]: return _load_state_dict( [rank14]: ^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank26]: checkpoint.load_state_dict( [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank26]: return arg(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank26]: return _load_state_dict( [rank26]: ^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank3]: raise result [rank3]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank3]: Traceback (most recent call last): (RANK 0) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank14]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank14]: raise result [rank14]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank14]: Traceback (most recent call last): (RANK 0) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank26]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank26]: raise result [rank26]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank26]: Traceback (most recent call last): (RANK 0) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 1) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 2) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank3]: Traceback (most recent call last): (RANK 1) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 1) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 1) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 2) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 2) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 2) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank20]: raise CheckpointingException(_msg) [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 3) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: raise CheckpointingException(_msg) [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 4) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank3]: Traceback (most recent call last): (RANK 3) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 3) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 3) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 4) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 4) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 4) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 5) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 6) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 5) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 5) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 5) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 6) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 6) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 7) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 6) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 7) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 7) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): (RANK 8) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 7) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 8) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank14]: Traceback (most recent call last): (RANK 8) [rank26]: Traceback (most recent call last): (RANK 8) [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 9) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 9) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 9) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 10) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: Traceback (most recent call last): (RANK 9) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 10) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 10) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 10) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 11) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 11) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 11) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 12) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: Traceback (most recent call last): (RANK 11) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 13) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 12) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 12) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 12) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 13) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 13) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 13) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 14) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 15) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 14) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 14) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 14) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 15) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 15) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank20]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 15) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 16) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 16) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank14]: raise CheckpointingException(_msg) [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 17) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 16) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 16) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 17) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 17) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 18) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: Traceback (most recent call last): (RANK 17) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 19) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 18) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 18) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 18) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 19) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 19) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 20) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 19) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 20) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 20) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): (RANK 21) [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 20) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: Traceback (most recent call last): (RANK 21) [rank26]: Traceback (most recent call last): (RANK 21) [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 22) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 21) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 22) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 22) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 23) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 22) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 23) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 23) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 24) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 23) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 24) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 24) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 24) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 25) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 26) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 25) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 25) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: Traceback (most recent call last): (RANK 25) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 26) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 26) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 26) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 27) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 28) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 27) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 27) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: Traceback (most recent call last): (RANK 27) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 28) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 28) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank20]: raise CheckpointingException(_msg) [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 28) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 29) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: raise CheckpointingException(_msg) [rank26]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 30) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 29) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 29) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 29) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 30) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 30) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 31) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 30) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 31) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 31) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank16]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank16]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank16]: ^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank16]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 31) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank16]: return _load_global_dist_base_checkpoint( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank16]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank16]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank9]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank9]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank9]: ^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank9]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank29]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank29]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank29]: ^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank29]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank16]: checkpoint.load_state_dict( [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank16]: return arg(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank16]: return _load_state_dict( [rank16]: ^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank16]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank4]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank9]: return _load_global_dist_base_checkpoint( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank9]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank9]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank29]: return _load_global_dist_base_checkpoint( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank29]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank29]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank4]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank4]: ^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank4]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank4]: return _load_global_dist_base_checkpoint( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank9]: checkpoint.load_state_dict( [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank9]: return arg(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank9]: return _load_state_dict( [rank9]: ^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank9]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank29]: checkpoint.load_state_dict( [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank29]: return arg(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank29]: return _load_state_dict( [rank29]: ^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank29]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank16]: raise result [rank16]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank16]: Traceback (most recent call last): (RANK 0) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank4]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank4]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank4]: checkpoint.load_state_dict( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank4]: return arg(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank4]: return _load_state_dict( [rank4]: ^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank4]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank9]: raise result [rank9]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank9]: Traceback (most recent call last): (RANK 0) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank29]: raise result [rank29]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank29]: Traceback (most recent call last): (RANK 0) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: Traceback (most recent call last): (RANK 1) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank4]: raise result [rank4]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank4]: Traceback (most recent call last): (RANK 0) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 1) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 2) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: Traceback (most recent call last): (RANK 1) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 1) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 2) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 2) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: Traceback (most recent call last): (RANK 3) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 3) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 4) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 2) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: Traceback (most recent call last): (RANK 3) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 3) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 4) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 4) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: ^^^^^^^^^ [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank16]: Traceback (most recent call last): (RANK 5) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 5) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 6) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 4) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: Traceback (most recent call last): (RANK 5) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 5) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 6) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 6) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 7) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_[rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank2]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank2]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank2]: ^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank2]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 7) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 7) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 8) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank2]: return _load_global_dist_base_checkpoint( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank2]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank2]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line[rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank19]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank19]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank19]: ^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank2]: checkpoint.load_state_dict( [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank2]: return arg(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank2]: return _load_state_dict( [rank2]: ^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank2]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 8) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 8) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank19]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank19]: return _load_global_dist_base_checkpoint( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank19]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank2]: raise result [rank2]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank2]: Traceback (most recent call last): (RANK 0) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line[rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank24]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank24]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank24]: ^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank19]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank19]: checkpoint.load_state_dict( [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank19]: return arg(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank19]: return _load_state_dict( [rank19]: ^^^^^^^^^^^^^^^^^ [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 9) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank24]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank24]: return _load_global_dist_base_checkpoint( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank24]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank19]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank19]: raise result [rank19]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank19]: Traceback (most recent call last): (RANK 0) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 1) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 10) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank24]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank24]: checkpoint.load_state_dict( [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank24]: return arg(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank24]: return _load_state_dict( [rank24]: ^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: re 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank24]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank24]: raise result [rank24]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank24]: Traceback (most recent call last): (RANK 0) [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 9) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 2) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: re[rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank30]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank9]: Traceback (most recent call last): (RANK 11) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/li[rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank12]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank12]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank12]: ^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank30]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank30]: ^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank30]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank30]: return _load_global_dist_base_checkpoint( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 10) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANKtensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank12]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank12]: return _load_global_dist_base_checkpoint( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank12]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank30]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank30]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank30]: checkpoint.load_state_dict( [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 6) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank12]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank12]: checkpoint.load_state_dict( [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank12]: return arg(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank12]: return _load_state_dict( [rank12]: ^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank30]: return arg(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank30]: return _load_state_dict( [rank30]: ^^^^^^^^^^^^^^^^^ [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16[rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank23]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 7) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank12]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank12]: raise result [rank12]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank12]: Traceback (most recent call last): (RANK 0) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank30]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank30]: raise result [rank30]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank30]: Traceback (most recent call last): (RANK 0) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank23]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank23]: ^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank23]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank23]: return _load_global_dist_base_checkpoint( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: re[rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank11]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: re 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank23]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank23]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank23]: checkpoint.load_state_dict( [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 8) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank11]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank11]: ^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank11]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank11]: return _load_global_dist_base_checkpoint( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 9) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank23]: return arg(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank11]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank11]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank11]: checkpoint.load_state_dict( [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank23]: return _load_state_dict( [rank23]: ^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank23]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank23]: raise result [rank4]: File "/mnt/weka/home/hao.zhang/conda/minicond[rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank0]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank0]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank0]: ^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank11]: return arg(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank11]: return _load_state_dict( [rank11]: ^^^^^^^^^^^^^^^^^ [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 10) [rank23]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank23]: Traceback (most recent call last): (RANK 0) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: re[rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank0]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank0]: return _load_global_dist_base_checkpoint( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank0]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank11]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank11]: raise result [rank11]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank11]: Traceback (most recent call last): (RANK 0) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank17]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank17]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank17]: ^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank17]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank0]: checkpoint.load_state_dict( [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank0]: return arg(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank0]: return _load_state_dict( [rank0]: ^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: re[rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank15]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16[rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank27]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank17]: return _load_global_dist_base_checkpoint( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank17]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank17]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank0]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank0]: raise result [rank0]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank0]: Traceback (most recent call last): (RANK 0) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank15]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank15]: ^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank15]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank15]: return _load_global_dist_base_checkpoint( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank27]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank27]: ^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank27]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank27]: return _load_global_dist_base_checkpoint( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank17]: checkpoint.load_state_dict( [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank17]: return arg(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank15]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank15]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank15]: checkpoint.load_state_dict( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank27]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank27]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank27]: checkpoint.load_state_dict( [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank17]: return _load_state_dict( [rank17]: ^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank17]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank17]: raise result [rank0]: result = func(*args, **kwargs) [rank0]: [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank5]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank5]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank5]: ^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank27]: return arg(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank17]: Traceback (most recent call last): (RANK 0) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: re[rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank5]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank5]: return _load_global_dist_base_checkpoint( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank5]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank27]: return _load_state_dict( [rank27]: ^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank27]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank27]: raise result [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank21]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank21]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank21]: ^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank21]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank5]: checkpoint.load_state_dict( [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank5]: return arg(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank5]: return _load_state_dict( [rank5]: ^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank15]: return arg(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank15]: return _load_state_dict( [rank15]: ^^^^^^^^^^^^^^^^^ [rank27]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank27]: Traceback (most recent call last): (RANK 0) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: re[rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank21]: return _load_global_dist_base_checkpoint( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank21]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank21]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank5]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank5]: raise result [rank5]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank5]: Traceback (most recent call last): (RANK 0) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank15]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank15]: raise result [rank15]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank15]: Traceback (most recent call last): (RANK 0) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank28]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank28]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank28]: ^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank28]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank21]: checkpoint.load_state_dict( [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank21]: return arg(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: re[rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank13]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank28]: return _load_global_dist_base_checkpoint( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank28]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank28]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank21]: return _load_state_dict( [rank21]: ^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank21]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank21]: raise result [rank5]: result = func(*args, **kwargs) [rank5]: [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank1]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank1]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank1]: ^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank13]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank13]: ^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank13]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank13]: return _load_global_dist_base_checkpoint( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank28]: checkpoint.load_state_dict( [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank28]: return arg(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank21]: Traceback (most recent call last): (RANK 0) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: re[rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank1]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank1]: return _load_global_dist_base_checkpoint( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank1]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank13]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank13]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank13]: checkpoint.load_state_dict( [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank28]: return _load_state_dict( [rank28]: ^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank28]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank28]: raise result [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank18]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank18]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank18]: ^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank18]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank1]: checkpoint.load_state_dict( [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank1]: return arg(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank1]: return _load_state_dict( [rank1]: ^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank13]: return arg(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank13]: return _load_state_dict( [rank13]: ^^^^^^^^^^^^^^^^^ [rank28]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank28]: Traceback (most recent call last): (RANK 0) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: re[rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank18]: return _load_global_dist_base_checkpoint( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank18]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank18]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank1]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank1]: raise result [rank1]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank1]: Traceback (most recent call last): (RANK 0) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank13]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank13]: raise result [rank13]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank13]: Traceback (most recent call last): (RANK 0) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank25]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank25]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank25]: ^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank25]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank18]: checkpoint.load_state_dict( [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank18]: return arg(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: reb/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank25]: return _load_global_dist_base_checkpoint( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank25]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank25]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank18]: return _load_state_dict( [rank18]: ^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank18]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank18]: raise result [rank1]: result = func(*args, **kwargs) [rank1]: [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank6]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank6]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank6]: ^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 12) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank25]: checkpoint.load_state_dict( [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank25]: return arg(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank18]: Traceback (most recent call last): (RANK 0) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: re[rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank6]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank6]: return _load_global_dist_base_checkpoint( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank6]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank25]: return _load_state_dict( [rank25]: ^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank25]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank25]: raise result [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank22]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank22]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank22]: ^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank22]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank6]: checkpoint.load_state_dict( [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank6]: return arg(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank6]: return _load_state_dict( [rank6]: ^^^^^^^^^^^^^^^^^ [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank25]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank25]: Traceback (most recent call last): (RANK 0) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank22]: return _load_global_dist_base_checkpoint( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank22]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank22]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank6]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank6]: raise result [rank6]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank6]: Traceback (most recent call last): (RANK 0) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 13) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 1) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank22]: checkpoint.load_state_dict( [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank22]: return arg(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatrosult = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank22]: return _load_state_dict( [rank22]: ^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank22]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank22]: raise result [rank6]: result = func(*args, **kwargs) [rank6]: [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank7]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank7]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank7]: ^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 1) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank22]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank22]: Traceback (most recent call last): (RANK 0) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank7]: return _load_global_dist_base_checkpoint( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank7]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 2) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 1) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank7]: checkpoint.load_state_dict( [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank7]: return arg(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank7]: return _load_state_dict( [rank7]: ^^^^^^^^^^^^^^^^^ [rank12]: Traceback (most recent call last): (RANK 2) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddsult = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank7]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank7]: raise result [rank7]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank7]: Traceback (most recent call last): (RANK 0) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 1) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddsult = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 2) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank7]: result = func(*args, **kwargs) [rank7]: 3) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 1) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: raise CheckpointingException(_msg) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddsult = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 4) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 2) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 1) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 2) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddsult = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 5) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 1) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank23]: raise CheckpointingException(_msg) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddsult = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 2) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_a/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 1) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: raise CheckpointingException(_msg) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddsult = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank4]: Traceback (most recent call last): (RANK 9) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 2) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 1) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 10) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 2) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddsult = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 1) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank21]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 3) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 2) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank4]: Traceback (most recent call last): (RANK 11) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/li ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank25]: raise CheckpointingException(_msg) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddsult = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 1) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 4) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 2) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 1) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 2) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 3) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 5) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank0]: raise CheckpointingException(_msg) [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"sult = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 4) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 2) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 1) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 5) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 3) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 1) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"ing.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 3) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 2) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 4) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: Traceback (most recent call last): (RANK 2) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 4) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 5) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedd, line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"sult = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 3) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 6) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 5) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 1) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 4) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 7) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"ing.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 3) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 5) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 4) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 2) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 8) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", lineing.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 3) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddsult = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_ ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 5) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 1) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 1) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 4) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 2) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 6) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 5) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 2) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py" 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: Traceback (most recent call last): (RANK 7) [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 3) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 9) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 1) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 8) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line, line 605, in create_local_plan [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 4) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 2) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 6) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 10) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 5) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16n/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 7) [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 11) [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: Traceback (most recent call last): (RANK 14) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 1) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 15) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 12) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 2) [rank9]: result = func(*args, **kwargs) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 8) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line, line 605, in create_local_plan [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: Traceback (most recent call last): (RANK 16) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 11) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 6) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: Traceback (most recent call last): (RANK 13) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dising.position_embeddings.weight [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANKtensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 6) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank21]: Traceback (most recent call last): (RANK 3) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 12) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 7) [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 4) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 7) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 13) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 8) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 8) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dising.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 3) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank21]: Traceback (most recent call last): (RANK 5) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"ing.position_embeddings.weight [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 9) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank22]: Traceback (most recent call last): (RANK 3) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/minicond 3) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 4) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 4) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 4) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 5) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 10) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"t_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 5) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16sult = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank22]: Traceback (most recent call last): (RANK 5) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 1) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 6) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_ 3) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 4) [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 2) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 14) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank23]: Traceback (most recent call last): (RANK 7) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 5) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedd 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 8) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", lineing.position_embeddings.weight [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: Traceback (most recent call last): (RANK 15) [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 9) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: Traceback (most recent call last): (RANK 3) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_a/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 4) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 9) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 16) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/ing.position_embeddings.weight [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 10) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank17]: result = func(*args, **kwargs) [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 10) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: Traceback (most recent call last): (RANK 3) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 4) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 11) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank17]: Traceback (most recent call last): (RANK 5) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"ing.position_embeddings.weight [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 12) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank18]: Traceback (most recent call last): (RANK 3) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 11) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: ^^^^^^^^^ [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 4) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 12) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: Traceback (most recent call last): (RANK 5) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank18]: result = func(*args, **kwargs) [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 13) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 13) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 17) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dising.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 3) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank18]: Traceback (most recent call last): (RANK 5) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py" 87, in wrapper [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 4) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatrotensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 6) [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 18) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: ^^^^^^^^^ [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 9) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torc, line 605, in create_local_plan [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 7) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 6) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 5) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 10) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"ing.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 3) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 8) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 4) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16t_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 14) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/minicondtensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 7) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: ^^^^^^^^^ [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: Traceback (most recent call last): (RANK 6) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 8) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line, line 605, in create_local_plan [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank16]: raise CheckpointingException(_msg) [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 7) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 6) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 5) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 15) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 6) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 16) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 8) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/, line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 6) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/minicondtensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 7) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 7) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank5]: Traceback (most recent call last): (RANK 6) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 8) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 8) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 7) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 7) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 11) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 8) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank9]: Traceback (most recent call last): (RANK 17) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line, line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 6) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 8) [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: Traceback (most recent call last): (RANK 12) [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/minicond 3) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 18) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: raise CheckpointingException(_msg) [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 19) [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 13) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 7) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 4) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in , line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 6) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dis, line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 8) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 5) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 6) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 11) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 7) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 12) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_ 3) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 8) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 7) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: local_data = map_fun() [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", lineh.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 19) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 4) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 8) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 13) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 5) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank15]: Traceback (most recent call last): (RANK 20) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 9) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dis, line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 6) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_b/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 21) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 7) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 12) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junreduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 10) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 8) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 20) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16t_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line, line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 6) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 13) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 14) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 21) [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 14) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 15) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 7) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_sh 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 8) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 15) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 9) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 16) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/ 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line, line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 6) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 9) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 16) [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 10) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpointa/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 7) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 9) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16da/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 10) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 8) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank15]: Traceback (most recent call last): (RANK 22) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 11) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", linedistributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 10) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 17) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 23) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 12) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 11) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lia/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 13) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: Traceback (most recent call last): (RANK 18) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 9) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank15]: Traceback (most recent call last): (RANK 24) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnseapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 22) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/disdistributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torc 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 9) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 10) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 23) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank25]: Traceback (most recent call last): (RANK 17) [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 24) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 11) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lia/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planne384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 11) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 18) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: Traceback (most recent call last): (RANK 10) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 9) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torc384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 12) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 9) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 10) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 13) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/disrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: Traceback (most recent call last): (RANK 11) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 11) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/litensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 12) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: Traceback (most recent call last): (RANK 10) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 6) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank15]: Traceback (most recent call last): (RANK 25) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16t_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 7) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 13) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 14) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 26) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 14) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: raise CheckpointingException(_msg) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingExceptionr.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 15) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 8) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/minicondtensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 25) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 6) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 15) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 7) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 26) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 16) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 11) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 16) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 11) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 27) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^t_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 8) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/minicondn/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 14) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 12) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 14) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank18]: Traceback (most recent call last): (RANK 12) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 15) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank11]: Traceback (most recent call last): (RANK 15) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 13) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 13) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 16) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dis[rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank31]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank31]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank31]: ^^^^^^^^^^^^^^^^ [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dis 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: raise CheckpointingException(_msg) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank31]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank31]: return _load_global_dist_base_checkpoint( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank31]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 9) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 16) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank31]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank31]: checkpoint.load_state_dict( [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank31]: return arg(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank31]: return _load_state_dict( [rank31]: ^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 27) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank31]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank31]: raise result [rank31]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank31]: Traceback (most recent call last): (RANK 0) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 17) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: ret_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank21]: Traceback (most recent call last): (RANK 10) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 28) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 14) [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 18) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 9) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: local_plan = planner.create_local_plan() [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 29) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 15) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 19) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in b/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 16) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank19]: Traceback (most recent call last): (RANK 10) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 12) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 28) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16t_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 29) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 17) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 14) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 13) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.distdistributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 18) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 15) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatrob/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 19) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 12) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 17) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 16) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/ 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 18) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 9) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 13) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 20) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^ [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torc5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 30) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 21) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatrob/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junsult = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 10) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 12) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 11) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 31) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 1) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 13) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 12) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) _checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 30) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 2) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 13) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatroa/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddt_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/disdistributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: Traceback (most recent call last): (RANK 9) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 31) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 10) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 14) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank18]: Traceback (most recent call last): (RANK 17) [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 19) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 15) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 11) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 18) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lia/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 20) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 16) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/t_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torc384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 9) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank30]: raise CheckpointingException(_msg) [rank17]: Traceback (most recent call last): (RANK 11) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 10) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 21) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 14) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 12) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/jun 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 15) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 11) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 9) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 13) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/li/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 17) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 16) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/ 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dis384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 11) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 10) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank27]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 18) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 9) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 12) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16da/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 22) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 19) [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 10) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 13) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dis384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 11) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 20) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 23) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 24) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 17) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 12) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 21) [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnse384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 11) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 12) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 18) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 13) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shn/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 14) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torcdistributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/disdistributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 13) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 15) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/disrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 17) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 17) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 25) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 18) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 16) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 18) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpointn/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 14) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank11]: Traceback (most recent call last): (RANK 26) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torcda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 19) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingExceptiont_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 20) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 15) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank13]: Traceback (most recent call last): (RANK 14) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 22) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 21) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 16) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 15) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: Traceback (most recent call last): (RANK 23) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junt_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 14) [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpointn/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 14) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 24) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnseing.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 3) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank13]: Traceback (most recent call last): (RANK 16) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 27) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 15) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 4) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 15) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 16) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: Traceback (most recent call last): (RANK 28) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 5) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank19]: Traceback (most recent call last): (RANK 16) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 19) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpointb/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 29) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 12) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 19) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 17) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 20) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: Traceback (most recent call last): (RANK 20) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 13) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank13]: result = func(*args, **kwargs) [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 21) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatrob/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 21) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 12) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: Traceback (most recent call last): (RANK 18) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torc1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/jundistributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: Traceback (most recent call last): (RANK 22) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 17) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 23) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 13) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 18) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatroreduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 30) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 24) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnsedistributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 20) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 31) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torcrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 17) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 21) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 22) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 19) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 18) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 25) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torct_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 23) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: Traceback (most recent call last): (RANK 20) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: local_plan = planner.create_local_plan() [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: Traceback (most recent call last): (RANK 26) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: Traceback (most recent call last): (RANK 14) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 24) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 21) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 15) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planne/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingExceptionh.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 19) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 17) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 22) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 16) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 19) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 18) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: Traceback (most recent call last): (RANK 20) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 23) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 21) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 20) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 19) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in /state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 24) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junh.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 19) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 21) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 17) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 25) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junt_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: Traceback (most recent call last): (RANK 20) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 14) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 18) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 21) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 15) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 26) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 19) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in /state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 27) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/jun: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 27) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 16) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/t_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 17) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: raise CheckpointingException(_msg) [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank13]: raise CheckpointingException(_msg) [rank25]: Traceback (most recent call last): (RANK 28) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 14) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 18) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 28) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 29) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 15) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 29) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank25]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2da/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 16) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/da/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 19) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in n/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 9) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank29]: Traceback (most recent call last): (RANK 22) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: Traceback (most recent call last): (RANK 14) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 23) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 22) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 15) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 10) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 23) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16[rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 16) [rank10]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank10]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank10]: ^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank10]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank10]: return _load_global_dist_base_checkpoint( [rank29]: Traceback (most recent call last): (RANK 24) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnseda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 24) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnsedistributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpointn/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): (RANK 22) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: Traceback (most recent call last): (RANK 14) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank10]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank10]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank10]: checkpoint.load_state_dict( [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 23) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 17) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 15) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank10]: return arg(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank10]: return _load_state_dict( [rank10]: ^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank10]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank10]: raise result [rank10]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank10]: Traceback (most recent call last): (RANK 0) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: re384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 18) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 16) [rank12]: Traceback (most recent call last): (RANK 11) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: Traceback (most recent call last): (RANK 24) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnseda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpointapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 22) [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: Traceback (most recent call last): (RANK 22) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: Traceback (most recent call last): (RANK 19) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 12) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 23) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 23) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 13) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 20) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dis3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 24) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 30) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: Traceback (most recent call last): (RANK 24) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 21) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 25) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/jundistributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 25) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: Traceback (most recent call last): (RANK 31) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight sult = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 26) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank22]: Traceback (most recent call last): (RANK 17) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 26) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 1) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 18) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException, line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 27) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 6) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torcda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank10]: Traceback (most recent call last): (RANK 2) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 7) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 22) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 20) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embeddt_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 14) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank31]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 23) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 21) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 8) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", linerver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 24) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shreduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 15) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnseh.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank30]: Traceback (most recent call last): (RANK 25) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: Traceback (most recent call last): (RANK 19) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 20) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 16) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/ing.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 3) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 26) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 20) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 21) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 4) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 21) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shreduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 5) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 30) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/jundistributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py"distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 20) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 31) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank21]: Traceback (most recent call last): (RANK 17) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 18) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 21) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_sh/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: Traceback (most recent call last): (RANK 17) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 9) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 17) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 18) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 10) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torcrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 18) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torc, line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 19) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in /state_dict_loader.py", line 223, in local_step [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 6) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 19) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 25) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 17) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 26) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 7) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 20) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 21) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingExceptionrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 18) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 8) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", lineh.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: local_data = map_fun() [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 19) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in r.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: Traceback (most recent call last): (RANK 19) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/jun: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 27) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 25) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 25) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 26) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 20) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 28) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 26) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: raise CheckpointingException(_msg) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 27) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank12]: Traceback (most recent call last): (RANK 21) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/jun 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: Traceback (most recent call last): (RANK 29) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingExceptionda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 22) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank2]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3da/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 28) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: Traceback (most recent call last): (RANK 9) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 22) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 23) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank4]: local_plan = planner.create_local_plan() [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 24) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 29) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 10) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: Traceback (most recent call last): (RANK 23) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: ^^^^^^^^^ [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 24) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnseh.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 19) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.distapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 22) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16da/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 22) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 20) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 23) [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 25) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 26) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 21) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 23) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: local_plan = planner.create_local_plan() [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 24) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 24) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/jun: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 27) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planneapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 22) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnse384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 11) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 30) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 28) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: Traceback (most recent call last): (RANK 23) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 12) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 31) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 29) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 24) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 13) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight : Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 27) [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 27) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planneapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 22) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/disrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 28) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 28) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: Traceback (most recent call last): (RANK 23) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 25) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 29) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 26) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 24) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 19) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = plannereduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingExceptiont_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 14) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 29) [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 20) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 20) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 11) [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 15) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 21) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 21) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 16) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 12) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 27) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shreduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 13) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 25) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 20) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 28) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dis9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 30) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: Traceback (most recent call last): (RANK 26) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 21) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 29) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 31) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingExceptionda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 22) [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_sh^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 28) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 17) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight 384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 11) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 23) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 18) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 29) [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 12) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 24) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torc2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 13) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnseda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 22) [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 30) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: Traceback (most recent call last): (RANK 30) [rank27]: local_data = map_fun() [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dis: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 27) [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 23) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 31) [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 31) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 28) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 24) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 19) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnseda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 22) [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight r.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 29) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 25) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 20) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2t_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 23) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 26) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 14) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 21) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 24) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 22) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank27]: Traceback (most recent call last): (RANK 15) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnse8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 27) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^r.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: ^^^^^^^^^ [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: Traceback (most recent call last): (RANK 30) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 25) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 16) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 31) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 26) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 23) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 30) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 24) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 31) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() rver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 27) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^r.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 25) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 25) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 25) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 26) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 26) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 26) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 17) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 27) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank19]: raise CheckpointingException(_msg) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 18) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank19]: megatron.core.dist_checkpointing.core.CheckpointingExceptionrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 27) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^apes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 22) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: Traceback (most recent call last): (RANK 28) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 25) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 29) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torcrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 26) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 23) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 25) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 24) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: Traceback (most recent call last): (RANK 30) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 26) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 27) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planneapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 22) [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 31) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 28) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingExceptiont_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 23) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 14) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 29) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank8]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank8]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank8]: ^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank8]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 15) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 27) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 24) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank8]: return _load_global_dist_base_checkpoint( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank8]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank8]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 28) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planne_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 30) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank8]: checkpoint.load_state_dict( [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank8]: return arg(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank8]: return _load_state_dict( [rank8]: ^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank31]: raise CheckpointingException(_msg) [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank8]: raise result [rank8]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank8]: Traceback (most recent call last): (RANK 0) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 16) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 19) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 29) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 31) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 20) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 27) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): (RANK 1) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 21) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 28) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 28) [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 2) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: local_data = map_fun() [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/jun: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 27) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 29) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 29) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): (RANK 3) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 28) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 30) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 30) [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 4) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank24]: Traceback (most recent call last): (RANK 29) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 31) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 31) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 5) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight rver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight ^^^^^^ [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 6) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: Traceback (most recent call last): (RANK 17) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 18) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: Traceback (most recent call last): (RANK 25) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 28) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 7) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 26) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 29) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 8) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torcda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 22) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: raise CheckpointingException(_msg) [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 30) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 30) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 23) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 31) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 31) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: Traceback (most recent call last): (RANK 9) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 24) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight rver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 10) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnse4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 30) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 25) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 28) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 31) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 29) [rank8]: Traceback (most recent call last): (RANK 11) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 26) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 12) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight h.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 19) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 30) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: raise CheckpointingException(_msg) [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 13) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 20) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 30) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 31) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 31) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 14) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: Traceback (most recent call last): (RANK 21) [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight r.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junrver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight 6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 30) [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 25) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 15) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 16) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank27]: Traceback (most recent call last): (RANK 25) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 31) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 26) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 26) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: result = func(*args, **kwargs) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight : Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 27) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 27) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank8]: Traceback (most recent call last): (RANK 17) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingExceptionda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 28) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 18) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 22) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 28) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank6]: ^^^^^^^^^ [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 23) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 29) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 29) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank8]: Traceback (most recent call last): (RANK 19) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 27) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 30) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 20) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 24) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnse: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 27) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 28) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank6]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 21) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 28) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 31) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 29) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight r.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 22) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 25) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 29) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 23) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2rver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 30) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 26) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 24) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 25) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 26) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 27) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: ^^^^^^^^^ [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 31) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank8]: Traceback (most recent call last): (RANK 25) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight 7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 28) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 26) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 30) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 29) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 30) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank17]: Traceback (most recent call last): (RANK 31) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank8]: Traceback (most recent call last): (RANK 27) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 30) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 28) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 31) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 31) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight : Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 27) [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 29) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 28) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]:[W621 21:13:15.574865779 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 30) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank4]:[W621 21:13:15.576409316 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 29) [rank2]:[W621 21:13:15.657768840 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 31) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 30) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 31) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([49152, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight [rank26]:[W621 21:13:15.129415405 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:13:15.738062630 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:13:15.739193277 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank0]:[W621 21:13:15.754545590 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:13:15.665101510 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:13:15.802645342 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:13:15.131564100 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:13:15.726562496 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:13:15.147260403 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:13:15.257745568 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:13:15.179239724 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:13:15.260581437 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:13:15.267430813 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:13:15.202001994 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:13:15.276438969 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:13:15.756727985 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:13:15.779534674 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:13:15.779738547 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:13:15.316519302 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:13:15.247888854 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:13:15.917933264 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank8]:[W621 21:13:15.813306687 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank24]:[W621 21:13:15.334404742 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:13:15.815814256 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:13:15.339369796 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:13:15.269828952 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:13:15.270483044 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank16]:[W621 21:13:15.275914763 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:13:15.825573081 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:13:16.473000 132761 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 132833 closing signal SIGTERM W0621 21:13:16.483000 132761 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 132834 closing signal SIGTERM W0621 21:13:16.484000 132761 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 132835 closing signal SIGTERM W0621 21:13:16.484000 132761 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 132837 closing signal SIGTERM W0621 21:13:16.484000 132761 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 132838 closing signal SIGTERM W0621 21:13:16.484000 132761 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 132839 closing signal SIGTERM W0621 21:13:16.485000 132761 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 132840 closing signal SIGTERM W0621 21:13:16.872000 2052241 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2052310 closing signal SIGTERM W0621 21:13:16.872000 2052241 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2052312 closing signal SIGTERM W0621 21:13:16.873000 2052241 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2052313 closing signal SIGTERM W0621 21:13:16.873000 2052241 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2052314 closing signal SIGTERM W0621 21:13:16.874000 2052241 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2052315 closing signal SIGTERM W0621 21:13:16.874000 2052241 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2052316 closing signal SIGTERM W0621 21:13:16.874000 2052241 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2052317 closing signal SIGTERM W0621 21:13:17.031000 3352950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3353019 closing signal SIGTERM W0621 21:13:17.032000 3352950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3353020 closing signal SIGTERM W0621 21:13:17.033000 3352950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3353021 closing signal SIGTERM W0621 21:13:17.033000 3352950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3353022 closing signal SIGTERM W0621 21:13:17.033000 3352950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3353023 closing signal SIGTERM W0621 21:13:17.033000 3352950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3353024 closing signal SIGTERM W0621 21:13:17.034000 3352950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3353025 closing signal SIGTERM W0621 21:13:17.102000 3421622 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3421692 closing signal SIGTERM W0621 21:13:17.103000 3421622 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3421693 closing signal SIGTERM W0621 21:13:17.103000 3421622 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3421695 closing signal SIGTERM E0621 21:13:17.113000 132761 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 3 (pid: 132836) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:13:17.104000 3421622 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3421696 closing signal SIGTERM W0621 21:13:17.104000 3421622 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3421698 closing signal SIGTERM W0621 21:13:17.104000 3421622 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3421699 closing signal SIGTERM Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:13:16 host : fs-mbz-gpu-852 rank : 3 (local_rank: 3) exitcode : 1 (pid: 132836) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x E0621 21:13:17.489000 2052241 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 2052311) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:13:17.498000 2052241 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2052241_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:13:17.938248004 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:33792, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1480f3f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1480dce5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x1480dce5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x1480dce5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x1480dce57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x1480dce57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x1480dce58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x1480ec18b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x1480eb8fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x1480f4fccd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x1480f4fcce40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:13:17.510000 2052241 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2052241_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:13:17.949056686 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:33792, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1480f3f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1480dce5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x1480dce5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x1480dce5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x1480dce57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x1480dce57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x1480dce58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x1480ec18b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x1480eb8fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x1480f4fccd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x1480f4fcce40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:13:17.520000 2052241 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2052241_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:13:16 host : fs-mbz-gpu-901 rank : 25 (local_rank: 1) exitcode : 1 (pid: 2052311) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:13:17.599000 3352950 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 3353026) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:13:17.978049827 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:42672, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1471289785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14711185aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x14711185c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x14711185c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x14711185d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x1471118574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x147120b8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x1471202fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x147129a04d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x147129a04e40 in /lib/x86_64-linux-gnu/libc.so.6) E0621 21:13:17.619000 3421622 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 3421694) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:13:17.618000 3352950 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3352950_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:13:17.990005307 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:42672, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1471289785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14711185aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14711185c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14711185db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x147111857569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x147120b8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x1471202fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x147129a04d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x147129a04e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:13:17.628000 3352950 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3352950_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:13:17.545411775 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:37208, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14dfb55785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14df9e45aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x14df9e45c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) [W621 21:13:17.998600770 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:42672, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1471289785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14711185aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14711185c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x14df9e45c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x14df9e45d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x14df9e4574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x14dfad78b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #3: + 0x5babb3e (0x14711185db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x147111857569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x147120b8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x1471202fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x14dfacefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x14dfb653bd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x14dfb653be40 in /lib/x86_64-linux-gnu/libc.so.6) frame #24: + 0x29d90 (0x147129a04d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x147129a04e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:13:17.638000 3421622 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3421622_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:13:17.636000 3352950 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3352950_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:13:17.557352540 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:37208, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14dfb55785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14df9e45aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14df9e45c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) Traceback (most recent call last): File "", line 198, in _run_module_as_main frame #3: + 0x5babb3e (0x14df9e45db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14df9e457569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14dfad78b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14dfacefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in frame #24: + 0x29d90 (0x14dfb653bd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14dfb653be40 in /lib/x86_64-linux-gnu/libc.so.6) main() W0621 21:13:17.648000 3421622 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3421622_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [W621 21:13:17.566439111 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:37208, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14dfb55785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14df9e45aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14df9e45c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) return arg(*args, **kwargs) frame #3: + 0x5babb3e (0x14df9e45db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14df9e457569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14dfad78b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14dfacefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main frame #24: + 0x29d90 (0x14dfb653bd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14dfb653be40 in /lib/x86_64-linux-gnu/libc.so.6) launch(args) W0621 21:13:17.657000 3421622 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3421622_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch Traceback (most recent call last): run(args) File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ main() return launch_agent(self._config, self._entrypoint, list(args)) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent return arg(*args, **kwargs) raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:13:17 host : fs-mbz-gpu-881 rank : 23 (local_rank: 7) exitcode : 1 (pid: 3353026) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main ============================================================ launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:13:17 host : fs-mbz-gpu-870 rank : 13 (local_rank: 5) exitcode : 1 (pid: 3421697) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:13:17 host : fs-mbz-gpu-870 rank : 10 (local_rank: 2) exitcode : 1 (pid: 3421694) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=24576 + PROF_CTX_LENGTH=24576 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L24576*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L24576*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=24576, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 24576 --max-position-embeddings 24576 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 24576 --max-position-embeddings 24576 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 24576 --max-position-embeddings 24576 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 24576 --max-position-embeddings 24576 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:13:21.963000 134641 site-packages/torch/distributed/run.py:766] W0621 21:13:21.963000 134641 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:13:21.963000 134641 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:13:21.963000 134641 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:13:21.983000 2054085 site-packages/torch/distributed/run.py:766] W0621 21:13:21.983000 2054085 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:13:21.983000 2054085 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:13:21.983000 2054085 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:13:21.985000 3423466 site-packages/torch/distributed/run.py:766] W0621 21:13:21.985000 3423466 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:13:21.985000 3423466 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:13:21.985000 3423466 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:13:22.011000 3354776 site-packages/torch/distributed/run.py:766] W0621 21:13:22.011000 3354776 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:13:22.011000 3354776 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:13:22.011000 3354776 site-packages/torch/distributed/run.py:766] ***************************************** [rank8]:[W621 21:13:46.563049488 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:13:46.033251510 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:13:46.739094231 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:13:46.676269863 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:13:46.789701550 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:13:46.129918856 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:13:46.220402194 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:13:47.479455862 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:13:47.973161700 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:13:47.087031596 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:13:47.495435029 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:13:47.427745099 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:13:47.088407941 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:13:47.090840404 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:13:47.975841609 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:13:47.498266064 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:13:47.978019284 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:13:47.978085537 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:13:47.091587349 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:13:47.431400795 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:13:47.432386726 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:13:47.500984901 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:13:47.501307192 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:13:47.433015411 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:13:47.438162839 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:13:47.986361367 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:13:47.099963749 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:13:47.508560013 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:13:47.994874792 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:13:47.109446658 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:13:47.449062453 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:13:47.518257237 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.35 GiB is free. Including non-PyTorch memory, this process has 3.45 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.44 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.44 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.35 GiB is free. Including non-PyTorch memory, this process has 3.45 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.35 GiB is free. Including non-PyTorch memory, this process has 3.45 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.44 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.44 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.44 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.35 GiB is free. Including non-PyTorch memory, this process has 3.45 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.35 GiB is free. Including non-PyTorch memory, this process has 3.45 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.44 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.35 GiB is free. Including non-PyTorch memory, this process has 3.45 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.44 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.35 GiB is free. Including non-PyTorch memory, this process has 3.45 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.44 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.35 GiB is free. Including non-PyTorch memory, this process has 3.45 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.74 GiB is allocated by PyTorch, and 179.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]:[W621 21:14:00.325163549 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:14:00.326027074 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:14:00.462620550 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:14:00.558609917 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:14:00.622186845 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:14:00.624766505 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:14:00.696152568 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:14:00.640393342 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:14:00.642188423 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:14:00.644821313 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:14:00.675963945 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:14:00.144546586 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:14:00.691783495 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:14:00.144915547 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:14:00.149525042 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:14:00.767051046 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:14:00.246928399 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:14:00.415129632 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:14:00.416487673 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:14:01.464176090 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:14:01.470143260 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:14:01.510015207 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:14:01.014687534 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:14:01.490591231 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:14:01.533159988 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:14:01.617523231 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:14:01.720850003 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:14:01.441000 134641 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 134714 closing signal SIGTERM W0621 21:14:01.443000 134641 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 134715 closing signal SIGTERM W0621 21:14:01.443000 134641 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 134716 closing signal SIGTERM W0621 21:14:01.444000 134641 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 134718 closing signal SIGTERM W0621 21:14:01.444000 134641 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 134719 closing signal SIGTERM W0621 21:14:01.445000 134641 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 134720 closing signal SIGTERM W0621 21:14:01.445000 134641 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 134721 closing signal SIGTERM [rank26]:[W621 21:14:01.908658576 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:14:01.549000 3423466 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3423536 closing signal SIGTERM W0621 21:14:01.551000 3423466 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3423537 closing signal SIGTERM W0621 21:14:01.552000 3423466 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3423538 closing signal SIGTERM W0621 21:14:01.552000 3423466 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3423539 closing signal SIGTERM W0621 21:14:01.552000 3423466 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3423540 closing signal SIGTERM W0621 21:14:01.553000 3423466 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3423541 closing signal SIGTERM W0621 21:14:01.553000 3423466 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3423543 closing signal SIGTERM E0621 21:14:01.823000 134641 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 3 (pid: 134717) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:14:01 host : fs-mbz-gpu-852 rank : 3 (local_rank: 3) exitcode : 1 (pid: 134717) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:14:02.031000 3423466 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 6 (pid: 3423542) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:14:02.960087538 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:52110, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1537abf785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x153794e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x153794e5c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x153794e5c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x153794e5d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x153794e574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x1537a418b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x1537a38fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x1537acfc9d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x1537acfc9e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:02.053000 3423466 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3423466_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [rank26]:[W621 21:14:02.490464724 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-901]:42806, remote=[fs-mbz-gpu-852]:42591): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14817e3785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14816725aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baae40 (0x14816725ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5bab74a (0x14816725d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::check(std::vector, std::allocator >, std::allocator, std::allocator > > > const&) + 0x2a9 (0x1481672571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x1481244509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so) frame #6: + 0xd3b6d (0x14817def1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6) frame #7: + 0x94ac3 (0x14817f3e1ac3 in /lib/x86_64-linux-gnu/libc.so.6) frame #8: + 0x126850 (0x14817f473850 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:14:02.972264417 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:52110, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1537abf785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x153794e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x153794e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) [rank26]:[W621 21:14:02.494794217 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 26] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes frame #3: + 0x5babb3e (0x153794e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x153794e57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x1537a418b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x1537a38fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x1537acfc9d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x1537acfc9e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:02.063000 3423466 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3423466_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:14:02.981119308 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:52110, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1537abf785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x153794e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x153794e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x153794e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x153794e57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x1537a418b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x1537a38fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x1537acfc9d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x1537acfc9e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:02.068000 2054085 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2054154 closing signal SIGTERM W0621 21:14:02.070000 2054085 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2054155 closing signal SIGTERM W0621 21:14:02.071000 2054085 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2054156 closing signal SIGTERM W0621 21:14:02.072000 3423466 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3423466_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper W0621 21:14:02.073000 2054085 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2054157 closing signal SIGTERM return arg(*args, **kwargs) W0621 21:14:02.073000 2054085 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2054159 closing signal SIGTERM ^^^^^^^^^^^^^^^^^^^^ W0621 21:14:02.074000 2054085 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2054160 closing signal SIGTERM File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main W0621 21:14:02.074000 2054085 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2054161 closing signal SIGTERM launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:14:01 host : fs-mbz-gpu-870 rank : 14 (local_rank: 6) exitcode : 1 (pid: 3423542) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x W0621 21:14:02.148000 3354776 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3354846 closing signal SIGTERM W0621 21:14:02.151000 3354776 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3354847 closing signal SIGTERM W0621 21:14:02.152000 3354776 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3354849 closing signal SIGTERM W0621 21:14:02.152000 3354776 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3354850 closing signal SIGTERM W0621 21:14:02.153000 3354776 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3354851 closing signal SIGTERM W0621 21:14:02.153000 3354776 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3354852 closing signal SIGTERM W0621 21:14:02.153000 3354776 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3354853 closing signal SIGTERM + set +x [W621 21:14:02.753473836 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:42294, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x155292b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x15527ba5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x15527ba5c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x15527ba5c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x15527ba5d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x15527ba574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x15528ad8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x15528a4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #16: + 0x94ac3 (0x155293beaac3 in /lib/x86_64-linux-gnu/libc.so.6) frame #17: + 0x126850 (0x155293c7c850 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:02.389000 3354776 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-881_3354776_0' has failed to send a keep-alive heartbeat to the rendezvous '343200' due to an error of type RendezvousConnectionError. E0621 21:14:02.502000 2054085 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 4 (pid: 2054158) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 E0621 21:14:02.508000 3354776 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 3354848) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:14:02.878830764 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:42294, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x155292b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x15527ba5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x15527ba5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x15527ba5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x15527ba57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x15528ad8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x15528a4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x155293b7fd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x155293b7fe40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:02.520000 2054085 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2054085_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:14:02.520000 3354776 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3354776_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:14:02.892397975 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:42294, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x155292b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x15527ba5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x15527ba5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x15527ba5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x15527ba57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x15528ad8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x15528a4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) [W621 21:14:02.960153525 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:51438, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14ae187785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14ae0165aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14ae0165c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #24: + 0x29d90 (0x155293b7fd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x155293b7fe40 in /lib/x86_64-linux-gnu/libc.so.6) frame #3: + 0x5babb3e (0x14ae0165db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14ae01657ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14ae01657ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14ae01658f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14ae1098b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14ae100fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14ae19814d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14ae19814e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:02.531000 3354776 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3354776_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:14:02.532000 2054085 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2054085_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:14:02.902226737 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:42294, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x155292b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x15527ba5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x15527ba5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x15527ba5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x15527ba57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x15528ad8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x15528a4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x155293b7fd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x155293b7fe40 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:14:02.971295198 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:51438, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14ae187785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14ae0165aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14ae0165c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14ae0165db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14ae01657ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14ae01657ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14ae01658f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14ae1098b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14ae100fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14ae19814d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14ae19814e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:02.541000 3354776 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3354776_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:14:02.542000 2054085 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2054085_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:14:02 host : fs-mbz-gpu-901 rank : 28 (local_rank: 4) exitcode : 1 (pid: 2054158) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:14:02 host : fs-mbz-gpu-881 rank : 18 (local_rank: 2) exitcode : 1 (pid: 3354848) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ ============================================================ + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=32768 + PROF_CTX_LENGTH=32768 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L32768*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L32768*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=32768, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 32768 --max-position-embeddings 32768 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 32768 --max-position-embeddings 32768 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 32768 --max-position-embeddings 32768 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 32768 --max-position-embeddings 32768 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:14:06.495000 2055898 site-packages/torch/distributed/run.py:766] W0621 21:14:06.495000 2055898 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:14:06.495000 2055898 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:14:06.495000 2055898 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:14:06.565000 3356607 site-packages/torch/distributed/run.py:766] W0621 21:14:06.565000 3356607 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:14:06.565000 3356607 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:14:06.565000 3356607 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:14:06.588000 3425279 site-packages/torch/distributed/run.py:766] W0621 21:14:06.588000 3425279 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:14:06.588000 3425279 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:14:06.588000 3425279 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:14:06.760000 136526 site-packages/torch/distributed/run.py:766] W0621 21:14:06.760000 136526 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:14:06.760000 136526 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:14:06.760000 136526 site-packages/torch/distributed/run.py:766] ***************************************** [rank8]:[W621 21:14:30.181969532 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:14:30.639981340 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:14:30.334613981 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:14:30.773940919 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:14:30.774991647 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:14:30.778872795 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:14:30.118485805 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:14:30.118531245 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:14:30.665788981 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:14:30.118571640 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:14:30.665964573 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:14:30.780700261 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:14:30.780851258 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:14:30.782485706 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:14:30.123444965 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:14:30.124354449 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:14:30.124434427 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:14:30.673255797 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:14:30.677245300 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:14:30.677443877 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:14:30.677568931 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:14:30.133357288 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:14:30.797977448 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:14:30.688129480 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:14:30.237833429 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:14:30.305184445 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:14:30.305210695 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:14:30.305233074 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:14:30.305251613 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:14:30.305288114 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:14:30.305311765 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:14:30.305324619 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.08 GiB is free. Including non-PyTorch memory, this process has 3.73 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.74 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.74 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.74 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.74 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.08 GiB is free. Including non-PyTorch memory, this process has 3.73 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.08 GiB is free. Including non-PyTorch memory, this process has 3.73 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.08 GiB is free. Including non-PyTorch memory, this process has 3.73 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.04 GiB is free. Including non-PyTorch memory, this process has 3.76 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.75 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.04 GiB is free. Including non-PyTorch memory, this process has 3.76 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.75 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.04 GiB is free. Including non-PyTorch memory, this process has 3.76 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.75 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.75 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.04 GiB is free. Including non-PyTorch memory, this process has 3.76 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.04 GiB is free. Including non-PyTorch memory, this process has 3.76 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.75 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.75 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.04 GiB is free. Including non-PyTorch memory, this process has 3.76 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.75 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.04 GiB is free. Including non-PyTorch memory, this process has 3.76 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.04 GiB is free. Including non-PyTorch memory, this process has 3.76 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.75 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.08 GiB is free. Including non-PyTorch memory, this process has 3.73 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.74 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.08 GiB is free. Including non-PyTorch memory, this process has 3.73 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.74 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.08 GiB is free. Including non-PyTorch memory, this process has 3.73 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.74 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.08 GiB is free. Including non-PyTorch memory, this process has 3.73 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.06 GiB is free. Including non-PyTorch memory, this process has 3.74 GiB memory in use. Of the allocated memory 2.15 GiB is allocated by PyTorch, and 83.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]:[W621 21:14:43.281464489 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:14:43.306317665 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:14:43.335317570 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:14:43.391468802 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:14:43.400791665 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:14:43.413461880 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:14:43.421936507 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:14:43.900666068 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:14:43.952782223 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:14:43.560890531 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:14:43.590706192 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:14:43.597556385 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:14:43.685237229 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:14:43.781203754 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:14:43.790495850 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:14:43.791993707 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:14:43.326592218 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:14:43.370478442 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:14:44.375904974 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:14:44.385467355 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:14:44.459616445 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:14:44.474500196 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:14:44.423551858 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:14:44.496148702 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:14:44.525377018 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:14:44.555219388 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:14:44.562691253 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:14:44.835426888 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:14:44.488000 136526 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 136598 closing signal SIGTERM W0621 21:14:44.490000 136526 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 136599 closing signal SIGTERM W0621 21:14:44.491000 136526 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 136601 closing signal SIGTERM W0621 21:14:44.491000 136526 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 136602 closing signal SIGTERM W0621 21:14:44.492000 136526 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 136603 closing signal SIGTERM W0621 21:14:44.492000 136526 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 136604 closing signal SIGTERM W0621 21:14:44.492000 136526 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 136605 closing signal SIGTERM W0621 21:14:44.626000 3425279 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3425350 closing signal SIGTERM W0621 21:14:44.629000 3425279 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3425352 closing signal SIGTERM W0621 21:14:44.629000 3425279 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3425353 closing signal SIGTERM W0621 21:14:44.630000 3425279 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3425354 closing signal SIGTERM W0621 21:14:44.630000 3425279 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3425355 closing signal SIGTERM W0621 21:14:44.630000 3425279 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3425356 closing signal SIGTERM W0621 21:14:44.631000 3425279 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3425357 closing signal SIGTERM W0621 21:14:44.784000 3356607 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3356679 closing signal SIGTERM W0621 21:14:44.785000 3356607 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3356680 closing signal SIGTERM W0621 21:14:44.787000 3356607 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3356681 closing signal SIGTERM W0621 21:14:44.787000 3356607 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3356682 closing signal SIGTERM W0621 21:14:44.788000 3356607 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3356683 closing signal SIGTERM W0621 21:14:44.788000 3356607 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3356684 closing signal SIGTERM W0621 21:14:44.789000 3356607 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3356686 closing signal SIGTERM E0621 21:14:44.920000 136526 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 136600) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:14:44 host : fs-mbz-gpu-852 rank : 2 (local_rank: 2) exitcode : 1 (pid: 136600) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:14:45.085000 3425279 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 3425351) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:14:45.013837523 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-870]:42608, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14b3ecb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14b3d5a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x14b3d5a5c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x14b3d5a5c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x14b3d5a5d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x14b3d5a574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x14b3e4d8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x14b3e44fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x14b3edbecd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x14b3edbece40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:45.107000 3425279 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3425279_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:14:45.025756450 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-870]:42608, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14b3ecb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14b3d5a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14b3d5a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14b3d5a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14b3d5a57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14b3e4d8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14b3e44fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14b3edbecd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14b3edbece40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:45.117000 3425279 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3425279_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:14:45.035040210 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-870]:42608, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14b3ecb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14b3d5a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14b3d5a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14b3d5a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14b3d5a57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14b3e4d8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14b3e44fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14b3edbecd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14b3edbece40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:45.126000 3425279 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3425279_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:14:44 host : fs-mbz-gpu-870 rank : 9 (local_rank: 1) exitcode : 1 (pid: 3425351) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ [rank24]:[W621 21:14:45.601216323 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=75, addr=[fs-mbz-gpu-901]:51556, remote=[fs-mbz-gpu-852]:45473): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1515e5b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1515cea5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baae40 (0x1515cea5ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5bab74a (0x1515cea5d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::check(std::vector, std::allocator >, std::allocator, std::allocator > > > const&) + 0x2a9 (0x1515cea571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x15158bc509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so) frame #6: + 0xd3b6d (0x1515e56f1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6) frame #7: + 0x94ac3 (0x1515e6c07ac3 in /lib/x86_64-linux-gnu/libc.so.6) frame #8: + 0x126850 (0x1515e6c99850 in /lib/x86_64-linux-gnu/libc.so.6) [rank24]:[W621 21:14:45.605396619 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 24] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes W0621 21:14:45.192000 2055898 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2055969 closing signal SIGTERM W0621 21:14:45.195000 2055898 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2055970 closing signal SIGTERM W0621 21:14:45.195000 2055898 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2055971 closing signal SIGTERM W0621 21:14:45.196000 2055898 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2055972 closing signal SIGTERM W0621 21:14:45.196000 2055898 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2055973 closing signal SIGTERM W0621 21:14:45.196000 2055898 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2055974 closing signal SIGTERM W0621 21:14:45.197000 2055898 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2055976 closing signal SIGTERM E0621 21:14:45.224000 3356607 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 6 (pid: 3356685) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:14:45.623245860 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-881]:53362, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14a8c4b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14a8ada5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x14a8ada5c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x14a8ada5c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x14a8ada5d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x14a8ada574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x14a8bcd8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x14a8bc4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x14a8c5b3dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x14a8c5b3de40 in /lib/x86_64-linux-gnu/libc.so.6) + set +x W0621 21:14:45.278000 3356607 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3356607_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:14:45.649976967 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-881]:53362, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14a8c4b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14a8ada5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14a8ada5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14a8ada5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14a8ada57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14a8bcd8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14a8bc4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14a8c5b3dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14a8c5b3de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:45.288000 3356607 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3356607_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:14:45.658725111 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-881]:53362, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14a8c4b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14a8ada5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14a8ada5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14a8ada5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14a8ada57569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14a8bcd8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14a8bc4fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14a8c5b3dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14a8c5b3de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:45.306000 3356607 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3356607_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:14:44 host : fs-mbz-gpu-881 rank : 22 (local_rank: 6) exitcode : 1 (pid: 3356685) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x E0621 21:14:45.525000 2055898 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 6 (pid: 2055975) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:14:45.981972702 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-901]:56266, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1541527785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x15413b65aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x15413b65c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x15413b65c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x15413b65d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x15413b6574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x15414a98b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x15414a0fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x1541537a8d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x1541537a8e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:45.554000 2055898 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2055898_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:14:45.994349358 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-901]:56266, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1541527785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x15413b65aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x15413b65c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x15413b65db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x15413b657569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x15414a98b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x15414a0fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x1541537a8d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x1541537a8e40 in /lib/x86_64-linux-gnu/libc.so.6) + set +x W0621 21:14:45.564000 2055898 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2055898_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:14:45.003233835 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-901]:56266, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1541527785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x15413b65aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x15413b65c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x15413b65db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x15413b657569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x15414a98b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x15414a0fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x1541537a8d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x1541537a8e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:14:45.573000 2055898 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2055898_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:14:45 host : fs-mbz-gpu-901 rank : 30 (local_rank: 6) exitcode : 1 (pid: 2055975) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=40960 + PROF_CTX_LENGTH=40960 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L40960*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L40960*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=40960, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 40960 --max-position-embeddings 40960 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 40960 --max-position-embeddings 40960 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 40960 --max-position-embeddings 40960 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 40960 --max-position-embeddings 40960 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:14:49.465000 138407 site-packages/torch/distributed/run.py:766] W0621 21:14:49.465000 138407 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:14:49.465000 138407 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:14:49.465000 138407 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:14:49.531000 2057728 site-packages/torch/distributed/run.py:766] W0621 21:14:49.531000 2057728 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:14:49.531000 2057728 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:14:49.531000 2057728 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:14:49.604000 3358418 site-packages/torch/distributed/run.py:766] W0621 21:14:49.604000 3358418 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:14:49.604000 3358418 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:14:49.604000 3358418 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:14:49.617000 3427107 site-packages/torch/distributed/run.py:766] W0621 21:14:49.617000 3427107 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:14:49.617000 3427107 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:14:49.617000 3427107 site-packages/torch/distributed/run.py:766] ***************************************** [rank8]:[W621 21:15:11.752170922 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:15:11.222490307 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:15:11.899177813 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:15:12.655585402 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:15:12.145037804 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:15:12.598579402 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:15:12.598836641 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:15:12.599414011 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:15:12.260299157 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:15:12.147071730 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:15:12.147598402 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:15:12.669007244 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:15:12.669129323 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:15:12.669297600 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:15:12.600768852 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:15:12.150384860 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:15:12.606088016 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:15:12.674364936 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:15:12.674630074 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:15:12.153549733 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:15:12.153586608 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:15:12.153785485 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:15:12.607142081 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:15:12.676248365 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:15:12.607926300 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:15:12.269610725 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:15:12.270154521 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:15:12.271108997 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:15:12.271195613 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:15:12.271405076 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:15:12.681484655 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:15:12.278724314 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.66 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.65 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.64 GiB is free. Including non-PyTorch memory, this process has 4.17 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.66 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.67 GiB is free. Including non-PyTorch memory, this process has 4.13 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.64 GiB is free. Including non-PyTorch memory, this process has 4.17 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.65 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.65 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.64 GiB is free. Including non-PyTorch memory, this process has 4.17 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.67 GiB is free. Including non-PyTorch memory, this process has 4.13 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.66 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.67 GiB is free. Including non-PyTorch memory, this process has 4.13 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.64 GiB is free. Including non-PyTorch memory, this process has 4.17 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.67 GiB is free. Including non-PyTorch memory, this process has 4.13 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.65 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.64 GiB is free. Including non-PyTorch memory, this process has 4.17 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.66 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.66 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.65 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.65 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.67 GiB is free. Including non-PyTorch memory, this process has 4.13 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.67 GiB is free. Including non-PyTorch memory, this process has 4.13 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.64 GiB is free. Including non-PyTorch memory, this process has 4.17 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.65 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.67 GiB is free. Including non-PyTorch memory, this process has 4.13 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.64 GiB is free. Including non-PyTorch memory, this process has 4.17 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.66 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.65 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.67 GiB is free. Including non-PyTorch memory, this process has 4.13 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.64 GiB is free. Including non-PyTorch memory, this process has 4.17 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.66 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 51200.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.66 GiB is free. Including non-PyTorch memory, this process has 4.15 GiB memory in use. Of the allocated memory 2.56 GiB is allocated by PyTorch, and 79.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]:[W621 21:15:23.240853683 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:15:23.254444872 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:15:23.400405327 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:15:23.783657270 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:15:23.862603119 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:15:23.809499632 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:15:23.479150642 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:15:23.479165289 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:15:23.376662912 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:15:23.954689879 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:15:23.551667000 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:15:23.900166850 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:15:23.564957260 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:15:23.012758780 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:15:23.019236088 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:15:23.019286632 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:15:23.954767081 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:15:23.510796809 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:15:23.042049389 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:15:23.973662889 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:15:23.981175389 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:15:23.532204977 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:15:23.534140584 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:15:23.986944913 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:15:23.080202706 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:15:23.578499930 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:15:23.629071232 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:15:23.713821289 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:15:24.244000 138407 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 138480 closing signal SIGTERM W0621 21:15:24.246000 138407 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 138481 closing signal SIGTERM W0621 21:15:24.247000 138407 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 138482 closing signal SIGTERM W0621 21:15:24.247000 138407 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 138483 closing signal SIGTERM W0621 21:15:24.247000 138407 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 138485 closing signal SIGTERM W0621 21:15:24.248000 138407 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 138486 closing signal SIGTERM W0621 21:15:24.248000 138407 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 138487 closing signal SIGTERM W0621 21:15:24.350000 2057728 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2057798 closing signal SIGTERM W0621 21:15:24.353000 2057728 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2057799 closing signal SIGTERM W0621 21:15:24.354000 2057728 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2057800 closing signal SIGTERM W0621 21:15:24.354000 2057728 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2057801 closing signal SIGTERM W0621 21:15:24.355000 2057728 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2057802 closing signal SIGTERM W0621 21:15:24.355000 2057728 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2057803 closing signal SIGTERM W0621 21:15:24.355000 2057728 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2057804 closing signal SIGTERM W0621 21:15:24.446000 3358418 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3358506 closing signal SIGTERM W0621 21:15:24.449000 3358418 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3358508 closing signal SIGTERM W0621 21:15:24.450000 3358418 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3358509 closing signal SIGTERM W0621 21:15:24.451000 3358418 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3358511 closing signal SIGTERM W0621 21:15:24.451000 3358418 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3358512 closing signal SIGTERM W0621 21:15:24.452000 3358418 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3358513 closing signal SIGTERM W0621 21:15:24.456000 3427107 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3427177 closing signal SIGTERM W0621 21:15:24.459000 3427107 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3427178 closing signal SIGTERM W0621 21:15:24.460000 3427107 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3427179 closing signal SIGTERM W0621 21:15:24.460000 3427107 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3427180 closing signal SIGTERM W0621 21:15:24.461000 3427107 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3427181 closing signal SIGTERM W0621 21:15:24.461000 3427107 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3427183 closing signal SIGTERM W0621 21:15:24.461000 3427107 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3427184 closing signal SIGTERM E0621 21:15:24.690000 138407 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 4 (pid: 138484) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:15:24 host : fs-mbz-gpu-852 rank : 4 (local_rank: 4) exitcode : 1 (pid: 138484) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:15:24.833000 2057728 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 2057805) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:15:24.845000 2057728 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2057728_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:15:24.285214682 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:34792, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x151b68d785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x151b5205aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x151b5205c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x151b5205db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x151b52057ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x151b52057ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x151b52058f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x151b6138b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x151b60afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x151b6a054d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x151b6a054e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:15:24.857000 2057728 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2057728_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:15:24.296807586 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:34792, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x151b68d785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x151b5205aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x151b5205c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x151b5205db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x151b52057ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x151b52057ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x151b52058f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x151b6138b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x151b60afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x151b6a054d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x151b6a054e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:15:24.868000 2057728 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2057728_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:15:24 host : fs-mbz-gpu-901 rank : 31 (local_rank: 7) exitcode : 1 (pid: 2057805) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:15:24.916000 3427107 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 3427182) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 E0621 21:15:24.930000 3358418 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 3358507) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:15:24.854040456 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:37160, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14f0ccd785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14f0b605aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x14f0b605c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x14f0b605c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x14f0b605d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x14f0b60574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x14f0c538b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x14f0c4afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x14f0ce057d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x14f0ce057e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:15:24.943000 3358418 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3358418_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:15:24.947000 3427107 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3427107_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:15:24.314980201 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:44216, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1495e3f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1495cce5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x1495cce5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x1495cce5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x1495cce57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x1495cce57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x1495cce58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x1495dc18b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x1495db8fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x1495e4fb9d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x1495e4fb9e40 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:15:24.866168863 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:37160, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14f0ccd785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14f0b605aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14f0b605c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14f0b605db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14f0b6057569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14f0c538b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14f0c4afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14f0ce057d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14f0ce057e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:15:24.956000 3358418 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3358418_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:15:24.958000 3427107 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3427107_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. + set +x [W621 21:15:24.875593012 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:37160, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14f0ccd785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14f0b605aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14f0b605c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14f0b605db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14f0b6057569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14f0c538b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14f0c4afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) [W621 21:15:24.326862762 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:44216, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1495e3f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1495cce5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x1495cce5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #24: + 0x29d90 (0x14f0ce057d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14f0ce057e40 in /lib/x86_64-linux-gnu/libc.so.6) frame #3: + 0x5babb3e (0x1495cce5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x1495cce57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x1495cce57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x1495cce58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x1495dc18b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x1495db8fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x1495e4fb9d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x1495e4fb9e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:15:24.967000 3427107 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3427107_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:15:24.967000 3358418 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3358418_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run run(args) elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run return launch_agent(self._config, self._entrypoint, list(args)) elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent return launch_agent(self._config, self._entrypoint, list(args)) raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:15:24 host : fs-mbz-gpu-870 rank : 13 (local_rank: 5) exitcode : 1 (pid: 3427182) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:15:24 host : fs-mbz-gpu-881 rank : 20 (local_rank: 4) exitcode : 1 (pid: 3358510) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:15:24 host : fs-mbz-gpu-881 rank : 17 (local_rank: 1) exitcode : 1 (pid: 3358507) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=49152 + PROF_CTX_LENGTH=49152 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L49152*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L49152*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=49152, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 49152 --max-position-embeddings 49152 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 49152 --max-position-embeddings 49152 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 49152 --max-position-embeddings 49152 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 49152 --max-position-embeddings 49152 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:15:28.841000 140337 site-packages/torch/distributed/run.py:766] W0621 21:15:28.841000 140337 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:15:28.841000 140337 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:15:28.841000 140337 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:15:28.898000 3360313 site-packages/torch/distributed/run.py:766] W0621 21:15:28.898000 3360313 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:15:28.898000 3360313 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:15:28.898000 3360313 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:15:28.949000 3429074 site-packages/torch/distributed/run.py:766] W0621 21:15:28.949000 3429074 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:15:28.949000 3429074 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:15:28.949000 3429074 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:15:28.986000 2059620 site-packages/torch/distributed/run.py:766] W0621 21:15:28.986000 2059620 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:15:28.986000 2059620 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:15:28.986000 2059620 site-packages/torch/distributed/run.py:766] ***************************************** [rank24]:[W621 21:15:51.098146307 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:15:51.034374172 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:15:51.725850016 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:15:51.898646812 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:15:51.031924960 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:15:52.440579833 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:15:52.372717456 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:15:52.034416139 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:15:52.920201153 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:15:52.036057488 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:15:52.923220910 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:15:52.444625218 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:15:52.444651610 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:15:52.923293061 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:15:52.036962205 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:15:52.376911217 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:15:52.377055031 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:15:52.924245716 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:15:52.445780390 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:15:52.378031243 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:15:52.455715297 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:15:52.456652012 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:15:52.456901804 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:15:52.389777904 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:15:52.936971727 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:15:52.389851651 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:15:52.050446524 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:15:52.050620074 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:15:52.938377533 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:15:52.391310245 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:15:52.938470404 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:15:52.053583368 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.17 GiB is free. Including non-PyTorch memory, this process has 4.63 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.17 GiB is free. Including non-PyTorch memory, this process has 4.63 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.16 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.17 GiB is free. Including non-PyTorch memory, this process has 4.63 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.16 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.14 GiB is free. Including non-PyTorch memory, this process has 4.67 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.17 GiB is free. Including non-PyTorch memory, this process has 4.63 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.17 GiB is free. Including non-PyTorch memory, this process has 4.63 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.14 GiB is free. Including non-PyTorch memory, this process has 4.67 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.16 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.14 GiB is free. Including non-PyTorch memory, this process has 4.67 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.14 GiB is free. Including non-PyTorch memory, this process has 4.67 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.14 GiB is free. Including non-PyTorch memory, this process has 4.67 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.16 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.16 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.17 GiB is free. Including non-PyTorch memory, this process has 4.63 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.14 GiB is free. Including non-PyTorch memory, this process has 4.67 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.15 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.17 GiB is free. Including non-PyTorch memory, this process has 4.63 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.16 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.15 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.16 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.17 GiB is free. Including non-PyTorch memory, this process has 4.63 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.16 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.15 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.15 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.15 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.14 GiB is free. Including non-PyTorch memory, this process has 4.67 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.14 GiB is free. Including non-PyTorch memory, this process has 4.67 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.15 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.15 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 73728.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.15 GiB is free. Including non-PyTorch memory, this process has 4.65 GiB memory in use. Of the allocated memory 2.96 GiB is allocated by PyTorch, and 175.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]:[W621 21:16:03.519661320 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:16:03.608602522 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:16:03.224347128 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:16:03.236581228 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:16:03.242511206 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:16:03.676440227 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:16:03.692285612 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:16:03.269864283 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:16:03.393831775 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:16:03.396451363 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:16:03.286726000 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:16:03.295484713 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:16:03.840675704 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:16:03.515503882 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:16:03.857605443 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:16:03.518190177 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:16:03.929649848 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:16:03.869890486 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:16:03.871769595 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:16:03.950556642 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:16:03.475551923 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:16:03.479917357 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:16:03.940362258 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:16:03.487686048 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:16:03.519811727 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:16:03.057564447 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:16:03.093286697 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:16:03.120011210 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:16:04.247000 140337 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 140408 closing signal SIGTERM W0621 21:16:04.253000 140337 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 140410 closing signal SIGTERM W0621 21:16:04.254000 140337 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 140411 closing signal SIGTERM W0621 21:16:04.255000 140337 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 140412 closing signal SIGTERM W0621 21:16:04.255000 140337 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 140413 closing signal SIGTERM W0621 21:16:04.255000 140337 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 140414 closing signal SIGTERM W0621 21:16:04.256000 140337 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 140415 closing signal SIGTERM W0621 21:16:04.274000 2059620 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2059689 closing signal SIGTERM W0621 21:16:04.276000 2059620 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2059690 closing signal SIGTERM W0621 21:16:04.277000 2059620 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2059691 closing signal SIGTERM W0621 21:16:04.277000 2059620 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2059692 closing signal SIGTERM W0621 21:16:04.278000 2059620 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2059693 closing signal SIGTERM W0621 21:16:04.278000 2059620 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2059694 closing signal SIGTERM W0621 21:16:04.278000 2059620 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2059695 closing signal SIGTERM W0621 21:16:04.304000 3429074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3429144 closing signal SIGTERM W0621 21:16:04.306000 3429074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3429145 closing signal SIGTERM W0621 21:16:04.306000 3429074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3429146 closing signal SIGTERM W0621 21:16:04.307000 3429074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3429147 closing signal SIGTERM W0621 21:16:04.307000 3429074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3429148 closing signal SIGTERM W0621 21:16:04.308000 3429074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3429150 closing signal SIGTERM W0621 21:16:04.308000 3429074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3429151 closing signal SIGTERM W0621 21:16:04.317000 3360313 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3360382 closing signal SIGTERM W0621 21:16:04.321000 3360313 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3360383 closing signal SIGTERM W0621 21:16:04.321000 3360313 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3360384 closing signal SIGTERM W0621 21:16:04.322000 3360313 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3360385 closing signal SIGTERM W0621 21:16:04.322000 3360313 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3360386 closing signal SIGTERM W0621 21:16:04.323000 3360313 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3360388 closing signal SIGTERM E0621 21:16:04.668000 140337 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 140409) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 E0621 21:16:04.701000 2059620 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 2059696) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:16:04 host : fs-mbz-gpu-852 rank : 1 (local_rank: 1) exitcode : 1 (pid: 140409) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:16:04 host : fs-mbz-gpu-901 rank : 31 (local_rank: 7) exitcode : 1 (pid: 2059696) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:16:04.749000 3429074 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 3429149) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 E0621 21:16:04.751000 3360313 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 3360387) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper main() return arg(*args, **kwargs) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main return arg(*args, **kwargs) launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run return launch_agent(self._config, self._entrypoint, list(args)) elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:16:04 host : fs-mbz-gpu-870 rank : 13 (local_rank: 5) exitcode : 1 (pid: 3429149) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:16:04 host : fs-mbz-gpu-881 rank : 21 (local_rank: 5) exitcode : 1 (pid: 3360387) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=65536 + PROF_CTX_LENGTH=65536 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L65536*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L65536*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=65536, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 65536 --max-position-embeddings 65536 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 65536 --max-position-embeddings 65536 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 65536 --max-position-embeddings 65536 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 65536 --max-position-embeddings 65536 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:16:08.739000 142199 site-packages/torch/distributed/run.py:766] W0621 21:16:08.739000 142199 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:08.739000 142199 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:16:08.739000 142199 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:16:08.846000 3430880 site-packages/torch/distributed/run.py:766] W0621 21:16:08.846000 3430880 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:08.846000 3430880 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:16:08.846000 3430880 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:08.847000 2061426 site-packages/torch/distributed/run.py:766] W0621 21:16:08.847000 2061426 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:08.847000 2061426 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:16:08.847000 2061426 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:08.846000 3362118 site-packages/torch/distributed/run.py:766] W0621 21:16:08.846000 3362118 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:08.846000 3362118 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:16:08.846000 3362118 site-packages/torch/distributed/run.py:766] ***************************************** [rank0]:[W621 21:16:31.258112861 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:16:31.178619686 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:16:31.781743180 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:16:31.534627174 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:16:31.988410276 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:16:31.540604623 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:16:31.993963089 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:16:31.544893127 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:16:31.997811615 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:16:31.661880865 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:16:31.070462046 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:16:31.070564183 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:16:31.663013356 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:16:31.070636405 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:16:31.668992790 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:16:31.151202956 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:16:31.784370118 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:16:31.784385478 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:16:31.124131396 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:16:31.673637778 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:16:31.674311350 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:16:31.127871391 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:16:31.788717873 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:16:31.675582865 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:16:31.676150049 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:16:31.129557924 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:16:31.130017989 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:16:31.199431654 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:16:31.199618000 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:16:31.791657381 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:16:31.200261417 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:16:31.200329580 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 134.17 GiB is free. Including non-PyTorch memory, this process has 5.63 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 134.16 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 134.17 GiB is free. Including non-PyTorch memory, this process has 5.63 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 134.17 GiB is free. Including non-PyTorch memory, this process has 5.63 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 134.17 GiB is free. Including non-PyTorch memory, this process has 5.63 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 134.17 GiB is free. Including non-PyTorch memory, this process has 5.63 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 134.16 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 134.17 GiB is free. Including non-PyTorch memory, this process has 5.63 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 134.16 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 134.15 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 134.15 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 134.16 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 134.15 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 134.16 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 134.14 GiB is free. Including non-PyTorch memory, this process has 5.67 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 134.17 GiB is free. Including non-PyTorch memory, this process has 5.63 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 134.14 GiB is free. Including non-PyTorch memory, this process has 5.67 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 134.15 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 134.16 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 134.15 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 134.14 GiB is free. Including non-PyTorch memory, this process has 5.67 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 134.15 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 134.16 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 134.14 GiB is free. Including non-PyTorch memory, this process has 5.67 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 134.14 GiB is free. Including non-PyTorch memory, this process has 5.67 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 134.17 GiB is free. Including non-PyTorch memory, this process has 5.63 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 134.15 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 134.14 GiB is free. Including non-PyTorch memory, this process has 5.67 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 134.16 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 134.15 GiB is free. Including non-PyTorch memory, this process has 5.65 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 134.14 GiB is free. Including non-PyTorch memory, this process has 5.67 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 131072.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 134.14 GiB is free. Including non-PyTorch memory, this process has 5.67 GiB memory in use. Of the allocated memory 3.77 GiB is allocated by PyTorch, and 367.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]:[W621 21:16:44.052850448 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:16:44.060772146 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:16:44.597770746 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:16:44.666741587 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:16:44.679766269 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:16:44.685469405 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:16:44.244463116 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:16:44.710895581 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:16:44.779714703 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:16:44.375308682 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:16:44.721609245 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:16:44.385266690 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:16:44.288800380 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:16:44.810663629 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:16:44.293387222 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:16:44.416240766 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:16:44.834796046 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:16:44.837826292 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:16:44.319692201 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:16:44.847130487 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:16:44.849222530 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:16:44.336744149 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:16:44.459297976 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:16:44.802488673 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:16:44.870876958 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:16:44.468699224 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:16:44.363813207 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:16:44.368181600 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:16:45.181000 142199 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 142270 closing signal SIGTERM W0621 21:16:45.182000 142199 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 142272 closing signal SIGTERM W0621 21:16:45.183000 142199 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 142273 closing signal SIGTERM W0621 21:16:45.184000 142199 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 142274 closing signal SIGTERM W0621 21:16:45.184000 142199 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 142275 closing signal SIGTERM W0621 21:16:45.184000 142199 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 142277 closing signal SIGTERM W0621 21:16:45.304000 3362118 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3362188 closing signal SIGTERM W0621 21:16:45.307000 3362118 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3362189 closing signal SIGTERM W0621 21:16:45.307000 3362118 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3362190 closing signal SIGTERM W0621 21:16:45.308000 3362118 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3362191 closing signal SIGTERM W0621 21:16:45.308000 3362118 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3362192 closing signal SIGTERM W0621 21:16:45.308000 3362118 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3362194 closing signal SIGTERM W0621 21:16:45.309000 3362118 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3362195 closing signal SIGTERM W0621 21:16:45.406000 2061426 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2061496 closing signal SIGTERM W0621 21:16:45.408000 2061426 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2061497 closing signal SIGTERM W0621 21:16:45.409000 2061426 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2061498 closing signal SIGTERM W0621 21:16:45.410000 2061426 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2061499 closing signal SIGTERM W0621 21:16:45.410000 2061426 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2061500 closing signal SIGTERM W0621 21:16:45.410000 2061426 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2061502 closing signal SIGTERM W0621 21:16:45.411000 2061426 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2061503 closing signal SIGTERM W0621 21:16:45.458000 3430880 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3430950 closing signal SIGTERM W0621 21:16:45.461000 3430880 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3430951 closing signal SIGTERM W0621 21:16:45.461000 3430880 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3430953 closing signal SIGTERM W0621 21:16:45.462000 3430880 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3430954 closing signal SIGTERM W0621 21:16:45.462000 3430880 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3430956 closing signal SIGTERM E0621 21:16:45.626000 142199 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 142271) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 E0621 21:16:45.650000 3362118 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 3362193) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:16:45 host : fs-mbz-gpu-852 rank : 6 (local_rank: 6) exitcode : 1 (pid: 142276) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:16:45 host : fs-mbz-gpu-852 rank : 1 (local_rank: 1) exitcode : 1 (pid: 142271) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:16:45 host : fs-mbz-gpu-881 rank : 21 (local_rank: 5) exitcode : 1 (pid: 3362193) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:16:45.702000 2061426 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 2061501) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:16:45.151864621 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:49078, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14772d1785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14771605aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x14771605c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x14771605c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x14771605d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x1477160574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x14772538b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x147724afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x14772e21dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x14772e21de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:16:45.724000 2061426 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2061426_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:16:45.164684014 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:49078, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14772d1785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14771605aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14771605c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14771605db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x147716057569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14772538b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x147724afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14772e21dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14772e21de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:16:45.735000 2061426 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2061426_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:16:45.174658717 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:49078, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14772d1785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14771605aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14771605c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14771605db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x147716057569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14772538b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x147724afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14772e21dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14772e21de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:16:45.745000 2061426 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2061426_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:16:45 host : fs-mbz-gpu-901 rank : 29 (local_rank: 5) exitcode : 1 (pid: 2061501) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:16:45.753000 3430880 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 3430952) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 [W621 21:16:45.682464524 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:33226, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14bb18d785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14bb0205aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x14bb0205c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x14bb0205c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x14bb0205d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x14bb020574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x14bb1138b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x14bb10afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x14bb1a02dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x14bb1a02de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:16:45.776000 3430880 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3430880_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:16:45.695070229 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:33226, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14bb18d785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14bb0205aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14bb0205c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14bb0205db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14bb02057569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14bb1138b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14bb10afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14bb1a02dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14bb1a02de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:16:45.786000 3430880 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3430880_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:16:45.704598528 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:33226, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14bb18d785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14bb0205aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14bb0205c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14bb0205db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14bb02057569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14bb1138b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14bb10afb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14bb1a02dd90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14bb1a02de40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:16:45.795000 3430880 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3430880_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:16:45 host : fs-mbz-gpu-870 rank : 13 (local_rank: 5) exitcode : 1 (pid: 3430955) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html [2]: time : 2025-06-21_21:16:45 host : fs-mbz-gpu-870 rank : 15 (local_rank: 7) exitcode : 1 (pid: 3430957) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:16:45 host : fs-mbz-gpu-870 rank : 10 (local_rank: 2) exitcode : 1 (pid: 3430952) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=81920 + PROF_CTX_LENGTH=81920 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L81920*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L81920*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=81920, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 81920 --max-position-embeddings 81920 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 81920 --max-position-embeddings 81920 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 81920 --max-position-embeddings 81920 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 81920 --max-position-embeddings 81920 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:16:49.714000 2063251 site-packages/torch/distributed/run.py:766] W0621 21:16:49.714000 2063251 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:49.714000 2063251 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:16:49.714000 2063251 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:16:49.823000 3432705 site-packages/torch/distributed/run.py:766] W0621 21:16:49.823000 3432705 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:49.823000 3432705 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:16:49.823000 3432705 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:49.828000 144078 site-packages/torch/distributed/run.py:766] W0621 21:16:49.828000 144078 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:49.828000 144078 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:16:49.828000 144078 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:16:49.881000 3363945 site-packages/torch/distributed/run.py:766] W0621 21:16:49.881000 3363945 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:16:49.881000 3363945 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:16:49.881000 3363945 site-packages/torch/distributed/run.py:766] ***************************************** [rank8]:[W621 21:17:12.671573750 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:17:12.832449830 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:17:12.303950953 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:17:13.070121293 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:17:13.523902867 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:17:13.594608701 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:17:13.201252439 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:17:13.624425303 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:17:13.570441548 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:17:13.639788909 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:17:13.639871666 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:17:13.571286841 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:17:13.234078848 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:17:13.578303316 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:17:13.125502465 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:17:13.125558362 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:17:13.579449862 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:17:13.648361439 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:17:13.126591302 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:17:13.649683763 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:17:13.243126617 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:17:13.650237431 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:17:13.128784118 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:17:13.243228651 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:17:13.652341472 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:17:13.129170270 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:17:13.243276120 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:17:13.243438431 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:17:13.585699883 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:17:13.253720320 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:17:13.587168146 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:17:13.138740714 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 133.17 GiB is free. Including non-PyTorch memory, this process has 6.63 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 133.17 GiB is free. Including non-PyTorch memory, this process has 6.63 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 133.16 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 133.16 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 133.16 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 133.17 GiB is free. Including non-PyTorch memory, this process has 6.63 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 133.15 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 133.16 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 133.15 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 133.17 GiB is free. Including non-PyTorch memory, this process has 6.63 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 133.17 GiB is free. Including non-PyTorch memory, this process has 6.63 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 133.15 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 133.16 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 133.14 GiB is free. Including non-PyTorch memory, this process has 6.67 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 133.15 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 133.16 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 133.14 GiB is free. Including non-PyTorch memory, this process has 6.67 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 133.17 GiB is free. Including non-PyTorch memory, this process has 6.63 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 133.15 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 133.14 GiB is free. Including non-PyTorch memory, this process has 6.67 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 133.17 GiB is free. Including non-PyTorch memory, this process has 6.63 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 133.15 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 133.14 GiB is free. Including non-PyTorch memory, this process has 6.67 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 133.16 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 133.14 GiB is free. Including non-PyTorch memory, this process has 6.67 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 133.14 GiB is free. Including non-PyTorch memory, this process has 6.67 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 133.17 GiB is free. Including non-PyTorch memory, this process has 6.63 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 133.14 GiB is free. Including non-PyTorch memory, this process has 6.67 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 133.15 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 133.16 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 133.14 GiB is free. Including non-PyTorch memory, this process has 6.67 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 204800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 133.15 GiB is free. Including non-PyTorch memory, this process has 6.65 GiB memory in use. Of the allocated memory 4.59 GiB is allocated by PyTorch, and 559.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]:[W621 21:17:26.308497202 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:17:26.319590417 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:17:26.356352446 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:17:26.755520221 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:17:26.422657896 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:17:26.422746601 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:17:26.338910931 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:17:26.796052542 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:17:26.801418987 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:17:26.462971685 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:17:26.358959965 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:17:26.818406212 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:17:26.371863870 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:17:26.844756175 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:17:26.915292961 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:17:26.414031367 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:17:26.950911551 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:17:26.444343158 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:17:26.974488728 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:17:26.991801632 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:17:26.994382504 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:17:26.996195647 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:17:26.024428679 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:17:26.963688053 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:17:26.643183212 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:17:26.996321535 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:17:26.573747096 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:17:26.614147238 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:17:27.461000 144078 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 144151 closing signal SIGTERM W0621 21:17:27.464000 144078 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 144153 closing signal SIGTERM W0621 21:17:27.465000 144078 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 144154 closing signal SIGTERM W0621 21:17:27.465000 144078 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 144155 closing signal SIGTERM W0621 21:17:27.466000 144078 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 144156 closing signal SIGTERM W0621 21:17:27.466000 144078 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 144157 closing signal SIGTERM W0621 21:17:27.466000 144078 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 144158 closing signal SIGTERM W0621 21:17:27.490000 3432705 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3432775 closing signal SIGTERM W0621 21:17:27.491000 2063251 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2063322 closing signal SIGTERM W0621 21:17:27.493000 2063251 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2063323 closing signal SIGTERM W0621 21:17:27.493000 3432705 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3432776 closing signal SIGTERM W0621 21:17:27.494000 2063251 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2063324 closing signal SIGTERM W0621 21:17:27.494000 3432705 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3432778 closing signal SIGTERM W0621 21:17:27.495000 2063251 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2063325 closing signal SIGTERM W0621 21:17:27.494000 3432705 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3432779 closing signal SIGTERM W0621 21:17:27.495000 2063251 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2063326 closing signal SIGTERM W0621 21:17:27.495000 2063251 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2063328 closing signal SIGTERM W0621 21:17:27.495000 3432705 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3432781 closing signal SIGTERM W0621 21:17:27.495000 2063251 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2063329 closing signal SIGTERM W0621 21:17:27.495000 3432705 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3432782 closing signal SIGTERM W0621 21:17:27.557000 3363945 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3364015 closing signal SIGTERM W0621 21:17:27.559000 3363945 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3364018 closing signal SIGTERM W0621 21:17:27.560000 3363945 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3364019 closing signal SIGTERM W0621 21:17:27.560000 3363945 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3364020 closing signal SIGTERM W0621 21:17:27.561000 3363945 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3364021 closing signal SIGTERM W0621 21:17:27.561000 3363945 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3364022 closing signal SIGTERM W0621 21:17:27.562000 3363945 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3364023 closing signal SIGTERM E0621 21:17:27.794000 144078 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 144152) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 E0621 21:17:27.823000 3432705 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 3432777) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:17:27 host : fs-mbz-gpu-852 rank : 1 (local_rank: 1) exitcode : 1 (pid: 144152) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:17:27 host : fs-mbz-gpu-870 rank : 13 (local_rank: 5) exitcode : 1 (pid: 3432780) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:17:27 host : fs-mbz-gpu-870 rank : 10 (local_rank: 2) exitcode : 1 (pid: 3432777) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:17:27.973000 2063251 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 2063327) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:17:27.986000 2063251 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2063251_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:17:27.426002538 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-901]:52104, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x154228f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x15421225aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x15421225c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x15421225db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x154212257ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x154212257ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x154212258f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x15422158b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x154220cfb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x15422a27ad90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x15422a27ae40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:17:27.999000 2063251 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2063251_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:17:27.437861501 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-901]:52104, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x154228f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x15421225aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x15421225c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x15421225db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x154212257ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x154212257ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x154212258f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x15422158b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x154220cfb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x15422a27ad90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x15422a27ae40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:17:28.009000 2063251 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2063251_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) E0621 21:17:28.016000 3363945 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 3364016) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:17:27 host : fs-mbz-gpu-901 rank : 29 (local_rank: 5) exitcode : 1 (pid: 2063327) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ [W621 21:17:28.397907967 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:53752, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14f0859785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14f06e85aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x14f06e85c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5baa81d (0x14f06e85c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x14f06e85d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x14f06e8574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x14f07db8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x14f07d2fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x14f086962d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x14f086962e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:17:28.038000 3363945 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3363945_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:17:28.409394959 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:53752, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14f0859785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14f06e85aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14f06e85c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14f06e85db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14f06e857569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14f07db8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14f07d2fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14f086962d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14f086962e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:17:28.047000 3363945 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3363945_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:17:28.418249679 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:53752, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14f0859785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14f06e85aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14f06e85c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14f06e85db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x14f06e857569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x14f07db8b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x14f07d2fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x14f086962d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x14f086962e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:17:28.056000 3363945 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3363945_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:17:27 host : fs-mbz-gpu-881 rank : 17 (local_rank: 1) exitcode : 1 (pid: 3364016) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=98304 + PROF_CTX_LENGTH=98304 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L98304*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L98304*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=98304, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 98304 --max-position-embeddings 98304 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 98304 --max-position-embeddings 98304 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 98304 --max-position-embeddings 98304 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 98304 --max-position-embeddings 98304 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:17:31.986000 145950 site-packages/torch/distributed/run.py:766] W0621 21:17:31.986000 145950 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:17:31.986000 145950 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:17:31.986000 145950 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:17:31.989000 2065087 site-packages/torch/distributed/run.py:766] W0621 21:17:31.989000 2065087 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:17:31.989000 2065087 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:17:31.989000 2065087 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:17:32.031000 3434539 site-packages/torch/distributed/run.py:766] W0621 21:17:32.031000 3434539 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:17:32.031000 3434539 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:17:32.031000 3434539 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:17:32.059000 3365780 site-packages/torch/distributed/run.py:766] W0621 21:17:32.059000 3365780 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:17:32.059000 3365780 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:17:32.059000 3365780 site-packages/torch/distributed/run.py:766] ***************************************** [rank0]:[W621 21:17:54.613794938 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank24]:[W621 21:17:54.177146529 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:17:54.161074134 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:17:54.719589083 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:17:54.843656378 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:17:54.730688088 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:17:54.252403974 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:17:54.184645852 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:17:54.850852963 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:17:54.259406218 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:17:54.260664348 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:17:54.261332729 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:17:54.853269842 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:17:54.740678181 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:17:54.194215282 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:17:54.855507473 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:17:54.857084267 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:17:54.265107198 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:17:54.743265496 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:17:54.196627202 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:17:54.196954853 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:17:54.858784679 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:17:54.267042263 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:17:54.745802403 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:17:54.267265509 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:17:54.745956386 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:17:54.199397984 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:17:54.746645650 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:17:54.860325812 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:17:54.200533101 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:17:54.747848397 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:17:54.200919268 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. checkpoint.load_state_dict( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. device = getattr(value, "device", None) [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank6]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank6]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank6]: ^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank6]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank20]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank20]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank20]: ^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank20]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank6]: return _load_global_dist_base_checkpoint( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank6]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank6]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank20]: return _load_global_dist_base_checkpoint( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank20]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank20]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank6]: checkpoint.load_state_dict( [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank6]: return arg(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank6]: return _load_state_dict( [rank6]: ^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank27]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank27]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank27]: ^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank27]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank14]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank14]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank14]: ^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank14]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank20]: checkpoint.load_state_dict( [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank20]: return arg(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank20]: return _load_state_dict( [rank20]: ^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank6]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank6]: raise result [rank6]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank6]: Traceback (most recent call last): (RANK 0) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank27]: return _load_global_dist_base_checkpoint( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank27]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank27]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank14]: return _load_global_dist_base_checkpoint( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank14]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank14]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank20]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank20]: raise result [rank20]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank20]: Traceback (most recent call last): (RANK 0) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank27]: checkpoint.load_state_dict( [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank27]: return arg(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank27]: return _load_state_dict( [rank27]: ^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank14]: checkpoint.load_state_dict( [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank14]: return arg(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank14]: return _load_state_dict( [rank14]: ^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank27]: raise result [rank27]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank27]: Traceback (most recent call last): (RANK 0) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank14]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank14]: raise result [rank14]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank14]: Traceback (most recent call last): (RANK 0) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank6]: Traceback (most recent call last): (RANK 1) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 1) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 2) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 2) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 1) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 1) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 2) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 2) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank20]: raise CheckpointingException(_msg) [rank6]: Traceback (most recent call last): (RANK 3) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 3) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 4) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: raise CheckpointingException(_msg) [rank14]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 4) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 3) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 3) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 5) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 4) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 4) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 5) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 6) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 6) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 5) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 5) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 6) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 6) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 7) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 7) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 8) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 7) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 7) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): (RANK 8) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 8) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: Traceback (most recent call last): (RANK 8) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 9) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: Traceback (most recent call last): (RANK 9) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 10) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 9) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 9) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): (RANK 10) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 10) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: Traceback (most recent call last): (RANK 10) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 11) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: Traceback (most recent call last): (RANK 11) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 12) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 11) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 11) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): (RANK 12) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 13) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 13) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank27]: Traceback (most recent call last): (RANK 12) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: Traceback (most recent call last): (RANK 12) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 13) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 13) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 14) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): (RANK 14) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 15) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 15) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank27]: Traceback (most recent call last): (RANK 14) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: Traceback (most recent call last): (RANK 14) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 16) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 15) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 15) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank20]: Traceback (most recent call last): (RANK 16) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 17) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: Traceback (most recent call last): (RANK 17) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: Traceback (most recent call last): (RANK 16) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: Traceback (most recent call last): (RANK 16) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 18) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 17) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 17) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 18) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 18) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 18) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 19) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank6]: Traceback (most recent call last): (RANK 19) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 20) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 19) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 19) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 20) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 21) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 21) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 20) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 20) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 21) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 21) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 22) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: ^^^^^^^^^ [rank14]: ^^^^^^^^^ [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 22) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 23) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 23) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 22) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 22) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 24) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 23) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 23) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 24) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 25) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 24) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 24) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: Traceback (most recent call last): (RANK 25) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 25) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 25) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 26) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 26) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 26) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 26) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 27) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: Traceback (most recent call last): (RANK 27) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 27) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 27) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 28) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 28) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 28) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 28) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): (RANK 29) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 29) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: Traceback (most recent call last): (RANK 29) [rank14]: Traceback (most recent call last): (RANK 29) [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 30) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 30) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 30) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 30) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank20]: Traceback (most recent call last): (RANK 31) [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank20]: local_data = map_fun() [rank20]: ^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank20]: result = func(*args, **kwargs) [rank20]: ^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank20]: local_plan = planner.create_local_plan() [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank6]: Traceback (most recent call last): (RANK 31) [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank6]: local_data = map_fun() [rank6]: ^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank6]: result = func(*args, **kwargs) [rank6]: ^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank6]: local_plan = planner.create_local_plan() [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank6]: raise CheckpointingException(_msg) [rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank27]: Traceback (most recent call last): (RANK 31) [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank27]: local_data = map_fun() [rank27]: ^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank27]: result = func(*args, **kwargs) [rank27]: ^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank27]: local_plan = planner.create_local_plan() [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank14]: Traceback (most recent call last): (RANK 31) [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank14]: local_data = map_fun() [rank14]: ^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank14]: result = func(*args, **kwargs) [rank14]: ^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank14]: local_plan = planner.create_local_plan() [rank20]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank20]: raise CheckpointingException(_msg) [rank20]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank19]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank4]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank19]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank19]: ^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank19]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank19]: return _load_global_dist_base_checkpoint( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank4]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank4]: ^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank4]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank4]: return _load_global_dist_base_checkpoint( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank27]: raise CheckpointingException(_msg) [rank27]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank29]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank14]: raise CheckpointingException(_msg) [rank14]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank8]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank19]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank19]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank19]: checkpoint.load_state_dict( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank4]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank4]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank4]: checkpoint.load_state_dict( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank29]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank29]: ^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank29]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank29]: return _load_global_dist_base_checkpoint( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank8]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank8]: ^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank8]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank8]: return _load_global_dist_base_checkpoint( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank19]: return arg(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank19]: return _load_state_dict( [rank19]: ^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank19]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank4]: return arg(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank4]: return _load_state_dict( [rank4]: ^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank4]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank29]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank29]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank29]: checkpoint.load_state_dict( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank8]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank8]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank8]: checkpoint.load_state_dict( [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank19]: raise result [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank4]: raise result [rank4]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank4]: Traceback (most recent call last): (RANK 0) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank29]: return arg(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank29]: return _load_state_dict( [rank29]: ^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank29]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank8]: return arg(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank8]: return _load_state_dict( [rank8]: ^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank8]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank19]: Traceback (most recent call last): (RANK 0) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank29]: raise result [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank8]: raise result [rank8]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 1) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank29]: Traceback (most recent call last): (RANK 0) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: Traceback (most recent call last): (RANK 0) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 1) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 1) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 1) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 2) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 2) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 2) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 2) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 3) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 3) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: ^^^^^^^^^ [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 3) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 3) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 4) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 4) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 4) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 4) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank19]: Traceback (most recent call last): (RANK 5) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 5) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 5) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: Traceback (most recent call last): (RANK 5) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 6) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: raise CheckpointingException(_msg) [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tenso[rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank12]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 6) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 6) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank12]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank12]: ^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank12]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank12]: return _load_global_dist_base_checkpoint( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: Traceback (most recent call last): (RANK 7) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 7) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank12]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank12]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank12]: checkpoint.load_state_dict( [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: Traceback (most recent call last): (RANK 7) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank12]: return arg(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank12]: return _load_state_dict( [rank12]: ^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank12]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 8) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 8) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/j[rank0]: Traceback (most recent call last): [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank12]: raise result [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 8) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in [rank30]: Traceback (most recent call last): [rank12]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank12]: Traceback (most recent call last): (RANK 0) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: Traceback (most recent call last): (RANK 9) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank0]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank0]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank0]: ^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank0]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank30]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank30]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank30]: ^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank30]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 1) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 10) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank0]: return _load_global_dist_base_checkpoint( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank0]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank0]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank30]: return _load_global_dist_base_checkpoint( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank30]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank30]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank0]: checkpoint.load_state_dict( [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank0]: return arg(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank0]: return _load_state_dict( [rank0]: ^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank0]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank30]: checkpoint.load_state_dict( [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank30]: return arg(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank30]: return _load_state_dict( [rank30]: ^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 2) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))[rank21]: Traceback (most recent call last): [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank0]: raise result [rank0]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank0]: Traceback (most recent call last): (RANK 0) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank21]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank21]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank21]: ^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank21]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank0]: result = func(*args, **kwargs) [rank0]: [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank30]: raise result [rank30]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank30]: Traceback (most recent call last): (RANK 0) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: rewrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embeddingrs) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank21]: return _load_global_dist_base_checkpoint( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank21]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank21]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank1]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank1]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank1]: ^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank1]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 6) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank21]: checkpoint.load_state_dict( [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank21]: return arg(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank21]: return _load_state_dict( [rank21]: ^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank1]: return _load_global_dist_base_checkpoint( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank1]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank1]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: Traceback (most recent call last): (RANK 9) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 7) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank21]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank1]: checkpoint.load_state_dict( [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank1]: return arg(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank1]: return _load_state_dict( [rank1]: ^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank1]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 10) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank21]: raise result [rank21]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank21]: Traceback (most recent call last): (RANK 0) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank1]: raise result [rank1]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank1]: Traceback (most recent call last): (RANK 0) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: raise CheckpointingException(_msg) [rank21]: re[rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank18]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank18]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank18]: ^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank1]: result = func(*args, **kwargs) [rank1]: [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))[rank25]: Traceback (most recent call last): [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 8) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/j[rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank18]: return _load_global_dist_base_checkpoint( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank18]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank5]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank5]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank5]: ^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank5]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank25]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank25]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank25]: ^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank25]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank11]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank11]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank11]: ^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank11]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank18]: checkpoint.load_state_dict( [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank18]: return arg(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank18]: return _load_state_dict( [rank18]: ^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank5]: return _load_global_dist_base_checkpoint( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank5]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank5]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank25]: return _load_global_dist_base_checkpoint( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank25]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank25]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank11]: return _load_global_dist_base_checkpoint( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank11]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank11]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank18]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank5]: checkpoint.load_state_dict( [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank5]: return arg(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank5]: return _load_state_dict( [rank5]: ^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank5]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank25]: checkpoint.load_state_dict( [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank25]: return arg(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank25]: return _load_state_dict( [rank25]: ^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank11]: checkpoint.load_state_dict( [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank11]: return arg(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank11]: return _load_state_dict( [rank11]: ^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank11]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank18]: raise result [rank18]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank18]: Traceback (most recent call last): (RANK 0) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank5]: raise result [rank5]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank5]: Traceback (most recent call last): (RANK 0) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: re tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 11) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: result = func(*args, **kwargs) [rank5]: [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank25]: raise result [rank25]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank25]: Traceback (most recent call last): (RANK 0) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank11]: raise result [rank11]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank11]: Traceback (most recent call last): (RANK 0) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: re[rank10]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 12) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank2]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank2]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank2]: ^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank2]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank25]: re[rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank24]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank24]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank24]: ^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank10]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank10]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank10]: ^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank10]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank2]: return _load_global_dist_base_checkpoint( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank2]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank2]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank24]: return _load_global_dist_base_checkpoint( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank24]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank10]: return _load_global_dist_base_checkpoint( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank10]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank10]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 13) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank2]: checkpoint.load_state_dict( [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank2]: return arg(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank2]: return _load_state_dict( [rank2]: ^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank2]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank24]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank24]: checkpoint.load_state_dict( [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank24]: return arg(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank24]: return _load_state_dict( [rank24]: ^^^^^^^^^^^^^^^^^ [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank10]: checkpoint.load_state_dict( [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank10]: return arg(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank10]: return _load_state_dict( [rank10]: ^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointisult = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank2]: raise result [rank2]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank2]: Traceback (most recent call last): (RANK 0) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank24]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank10]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 1) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank2]: result = func(*args, **kwargs) [rank2]: [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank24]: raise result [rank24]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank24]: Traceback (most recent call last): (RANK 0) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank10]: raise result [rank10]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank10]: Traceback (most recent call last): (RANK 0) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: re[rank9]: Traceback (most recent call last): [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 2) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank7]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank7]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank7]: ^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank7]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank24]: re[rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank26]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank26]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank26]: ^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank9]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank9]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank9]: ^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank9]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank21]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank7]: return _load_global_dist_base_checkpoint( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank7]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank7]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank26]: return _load_global_dist_base_checkpoint( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank26]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank9]: return _load_global_dist_base_checkpoint( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank9]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank9]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank7]: checkpoint.load_state_dict( [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank7]: return arg(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank7]: return _load_state_dict( [rank7]: ^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank7]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank26]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank26]: checkpoint.load_state_dict( [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank26]: return arg(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank26]: return _load_state_dict( [rank26]: ^^^^^^^^^^^^^^^^^ [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank9]: checkpoint.load_state_dict( [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank9]: return arg(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank9]: return _load_state_dict( [rank9]: ^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding[rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank22]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank22]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank22]: ^^^^^^^^^^^^^^^^ [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank7]: raise result [rank7]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank7]: Traceback (most recent call last): (RANK 0) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank26]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank9]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank22]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank22]: return _load_global_dist_base_checkpoint( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank22]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: result = func(*args, **kwargs) [rank7]: [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank26]: raise result [rank26]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank26]: Traceback (most recent call last): (RANK 0) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank9]: raise result [rank9]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank9]: Traceback (most recent call last): (RANK 0) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: [rank15]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank22]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank22]: checkpoint.load_state_dict( [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank22]: return arg(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank22]: return _load_state_dict( [rank22]: ^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank3]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank3]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank3]: ^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank3]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank26]: re[rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank28]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank28]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank28]: ^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank15]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank15]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank15]: ^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank15]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank22]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank22]: raise result [rank22]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank22]: Traceback (most recent call last): (RANK 0) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank3]: return _load_global_dist_base_checkpoint( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank3]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank3]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank28]: return _load_global_dist_base_checkpoint( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank28]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank15]: return _load_global_dist_base_checkpoint( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank15]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank15]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: re[rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank17]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank3]: checkpoint.load_state_dict( [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank3]: return arg(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank3]: return _load_state_dict( [rank3]: ^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank3]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank28]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank28]: checkpoint.load_state_dict( [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank28]: return arg(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank28]: return _load_state_dict( [rank28]: ^^^^^^^^^^^^^^^^^ [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank15]: checkpoint.load_state_dict( [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank15]: return arg(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank15]: return _load_state_dict( [rank15]: ^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank17]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank17]: ^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank17]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank17]: return _load_global_dist_base_checkpoint( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank3]: raise result [rank3]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank3]: Traceback (most recent call last): (RANK 0) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank28]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank15]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank17]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank17]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank17]: checkpoint.load_state_dict( [rank3]: result = func(*args, **kwargs) [rank3]: unda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank28]: raise result [rank28]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank28]: Traceback (most recent call last): (RANK 0) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank15]: raise result [rank15]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank15]: Traceback (most recent call last): (RANK 0) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank17]: return arg(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank17]: return _load_state_dict( [rank17]: ^^^^^^^^^^^^^^^^^ [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 9) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank28]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank15]: re.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 3) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank17]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank17]: raise result [rank17]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank17]: Traceback (most recent call last): (RANK 0) [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 1) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 4) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 10) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 2) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 1) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 11) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1 ^^^^^^^^^^^^^^^^^^^^^ [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 5) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 1) [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", lin[rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank13]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank13]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank13]: ^^^^^^^^^^^^^^^^ [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 2) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 11) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank13]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank13]: return _load_global_dist_base_checkpoint( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank13]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 2) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 12) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank13]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank13]: checkpoint.load_state_dict( [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank13]: return arg(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank13]: return _load_state_dict( [rank13]: ^^^^^^^^^^^^^^^^^ [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding[rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank23]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank13]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank13]: raise result [rank13]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank13]: Traceback (most recent call last): (RANK 0) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank23]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank23]: ^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank23]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank23]: return _load_global_dist_base_checkpoint( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 3) ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: reunda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank23]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank23]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank23]: checkpoint.load_state_dict( [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 1) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 13) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 9) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank23]: return arg(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointisult = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 10) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank23]: return _load_state_dict( [rank23]: ^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank23]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank23]: raise result [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 2) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 1) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank23]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank23]: Traceback (most recent call last): (RANK 0) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: re[rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 2) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank16]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank16]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank16]: ^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank16]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 3) ^^^^^^^^^^^^^^^^^^^^^ [rank25]: ^^^^^^^^^ [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 11) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1sult = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank16]: return _load_global_dist_base_checkpoint( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank16]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank16]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 1) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 1) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank16]: checkpoint.load_state_dict( [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank16]: return arg(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embeddingsult = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank16]: return _load_state_dict( [rank16]: ^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank16]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank16]: raise result [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 2) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 1) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank11]: Traceback (most recent call last): (RANK 2) [rank16]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank16]: Traceback (most recent call last): (RANK 0) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: re.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 3) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 2) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 3) ^^^^^^^^^^^^^^^^^^^^^ [rank24]: ^^^^^^^^^ [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 4) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 1) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 1) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embeddingsult = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 5) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 2) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 1) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 2) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", linsult = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 2) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 1) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 3) [rank28]: ^^^^^^^^^ [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 3)e 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 6) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank17]: raise CheckpointingException(_msg) [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 4) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 3) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 7) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 2) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 4) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embeddingsult = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 5) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 1) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 5) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 8) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in 2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tenso ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", lin.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 3) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank23]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 1) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 4) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 12) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 2) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 2) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embeddingsult = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 13) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 1) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank7]: raise CheckpointingException(_msg) [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 5) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_c [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 3) ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", linsult = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank22]: raise CheckpointingException(_msg) [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 1) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 1) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 4) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 2) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 2) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 2) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embeddinge 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 5) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 6) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding[rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain [rank31]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensosult = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 7) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 3)2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( [rank31]: ^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint [rank31]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint [rank31]: return _load_global_dist_base_checkpoint( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 1) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank21]: local_data = map_fun() [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 12) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank31]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load [rank31]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load [rank31]: checkpoint.load_state_dict( [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [rank31]: return arg(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict [rank31]: return _load_state_dict( [rank31]: ^^^^^^^^^^^^^^^^^ [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 2) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 8) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in .position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 3) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 13) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict [rank31]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter [rank31]: raise result [rank31]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) [rank31]: Traceback (most recent call last): (RANK 0) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embeddingsult = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: reng/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 1) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 4) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: Traceback (most recent call last): (RANK 14) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_c [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 15) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: raise CheckpointingException(_msg) [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 5) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 4) [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 2) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", linsult = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 3) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 1) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 5) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: Traceback (most recent call last): (RANK 16) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/chec.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 3) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 4) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 2) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tenso [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: Traceback (most recent call last): (RANK 4) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 5) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 4) [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", linrs) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank22]: Traceback (most recent call last): (RANK 3) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 5) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 6) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 4) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 5) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", lin.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 3) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 7) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tenso [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): (RANK 4) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank22]: Traceback (most recent call last): (RANK 5) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", lin.position_embeddings.weight [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 4) [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 8) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/jheckpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank18]: Traceback (most recent call last): (RANK 3) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 5) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 14) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 4) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 5) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", linsult = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 15) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 1) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensoheckpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 14) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank18]: Traceback (most recent call last): (RANK 5) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", lin.position_embeddings.weight [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 2) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 16) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_load.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 3) [rank23]: Traceback (most recent call last): (RANK 3) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 15) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 4) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 16) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embeddinge 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 4) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: ^^^^^^^^^ [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 6) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_load [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 7) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 5) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 4) [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank13]: Traceback (most recent call last): (RANK 5) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", linng/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 14) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 6) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 5) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: Traceback (most recent call last): (RANK 8) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in .position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 3) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 15) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 6) [rank26]: Traceback (most recent call last): (RANK 4) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 5) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 7) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", lin.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 3) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: Traceback (most recent call last): (RANK 7) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 16) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/chece 605, in create_local_plan [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 8) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in .position_embeddings.weight [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 6) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 8) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 4) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank15]: Traceback (most recent call last): (RANK 3) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/j [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 4) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 5) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank18]: Traceback (most recent call last): (RANK 7) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 4) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", linkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 5) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 8) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in e 605, in create_local_plan [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 5) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 17) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", liner.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 6) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank8]: Traceback (most recent call last): (RANK 17) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 18) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 6) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096e 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 18) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: Traceback (most recent call last): (RANK 7) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 7) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 6) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 19) [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 8) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in .position_embeddings.weight [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 7) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [ransult = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank16]: Traceback (most recent call last): (RANK 3) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: ^^^^^^^^^ [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 1) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 4) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: Traceback (most recent call last): (RANK 8) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/jrs) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 6) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 8) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in e 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 2) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 5) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 7) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 6) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", linwrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 7) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embeddinge 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 6) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 9) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: ^^^^^^^^^ [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: Traceback (most recent call last): (RANK 8) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/jrs) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank15]: raise CheckpointingException(_msg) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 10) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 6) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 8) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in e 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 7) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 7) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 6) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 8) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 7) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 9) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: ^^^^^^^^^ [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 9) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: Traceback (most recent call last): (RANK 8) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/jer.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 10) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 10) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 17) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 8) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 18) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 9) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))e 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 6) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))e 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 6) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 10) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 7) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 19) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [ranrs) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))e 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 7) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 8) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 6) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 6) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 11) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 7) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 7) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: Traceback (most recent call last): (RANK 8) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in .position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 3) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 12) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: local_data = map_fun() [rank0]: raise CheckpointingException(_msg) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 8) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in e 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: Traceback (most recent call last): (RANK 4) [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 8) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 6) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 13) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 9) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 5) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointie 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 6) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: Traceback (most recent call last): (RANK 7) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", linwrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 10) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 9) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 8) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in ])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 7) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 11) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1k4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank29]: Traceback (most recent call last): (RANK 19) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 8) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 20) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: Traceback (most recent call last): (RANK 10) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 20) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 11) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 9) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 21) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: ^^^^^^^^^ [rank29]: Traceback (most recent call last): (RANK 21) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatrowrapper [rank24]: result = func(*args, **kwargs) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 12) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 10) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 13) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: raiunda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 9) [rank24]: Traceback (most recent call last): (RANK 9) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointiunda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))ng/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 14) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 9) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 10) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 10) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 15) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 11) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 10) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1unda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 9) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 11) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1e 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: Traceback (most recent call last): (RANK 16) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checwrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: Traceback (most recent call last): (RANK 9) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 6) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 9) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 10) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 10) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 7) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 11) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: Traceback (most recent call last): (RANK 10) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: Traceback (most recent call last): (RANK 9) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 8) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 11) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 11) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 12) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 10) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 13) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 12) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: Traceback (most recent call last): (RANK 12) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: Traceback (most recent call last): (RANK 11) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 13) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 13) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_cunda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 12) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointiwrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointie 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 9) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 9) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 6) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 10) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank25]: Traceback (most recent call last): (RANK 13) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointiwrapper [rank26]: result = func(*args, **kwargs) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 10) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 7) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 9) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 8) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in kpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: Traceback (most recent call last): (RANK 11) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1unda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 10) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 17) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 9) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 12) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 10) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))n/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 22) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 13) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_cwrapper [rank10]: result = func(*args, **kwargs) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 18) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096kpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank1]: Traceback (most recent call last): (RANK 11) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1rs) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 6) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 23) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank10]: Traceback (most recent call last): (RANK 9) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 17) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 24) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: ^^^^^^^^^ [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 7) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 11) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 10) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 12) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 18) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 8) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank30]: ^^^^^^^^^ [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))ng/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 14) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096ng/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/jse CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 22) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 13) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 23) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointi tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 11) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank15]: Traceback (most recent call last): (RANK 15) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 14) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 12) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 16) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checwrapper [rank11]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 15) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 24) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: ^^^^^^^^^ [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [r2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank11]: Traceback (most recent call last): (RANK 9) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 16) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checwrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 12) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 13) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 10) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 9) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointi-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank0]: raise CheckpointingException(_msg) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 25) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 13) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 10) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_c2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 26) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))heckpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 14) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 19) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 12) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fo tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 11) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 15) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 20) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 13) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank28]: Traceback (most recent call last): (RANK 12) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 16) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 21) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 14) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_load tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 11) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatro tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 11) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 13) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 15) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 14) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 12) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 12) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 13) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 16) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 13) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointi tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 11) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_load2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 15) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointikpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 12) [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 16) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/chec tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 11) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 17) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 12) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 12) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 18) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 13) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 13) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointin/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_c2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 13) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096 tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 11) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 22) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 12) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: Traceback (most recent call last): (RANK 14) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 12) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: Traceback (most recent call last): (RANK 23) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 24) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site tensor for key embedding.position_embeddings.weight [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 13) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 15) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 13) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: Traceback (most recent call last): (RANK 11) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_cunda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointier.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 9) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): (RANK 16) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checng/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 17) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 12) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 10) [rank30]: Traceback (most recent call last): (RANK 14) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 18) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: Traceback (most recent call last): (RANK 13) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 11) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1ank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 15) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 19) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [ranng/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 14) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 25) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 26) [rank30]: Traceback (most recent call last): (RANK 16) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checng/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 14) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: Traceback (most recent call last): (RANK 14) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 15) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: Traceback (most recent call last): (RANK 15) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 27) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnheckpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 16) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checwrapper [rank16]: result = func(*args, **kwargs) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 14) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 15) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 16) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/chec])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 19) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 15) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: Traceback (most recent call last): (RANK 9) [rank0]: ^^^^^^^^^ [rank25]: Traceback (most recent call last): (RANK 16) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checr loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 27) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 20) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 10) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 16) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loadheckpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 21) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 14) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: Traceback (most recent call last): (RANK 28) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatrong/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))ng/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 14) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 15) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 29) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: raise CheckpointingException(_msg) [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hakpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 14) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 17) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 15) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank22]: Traceback (most recent call last): (RANK 15) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 16) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank30]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 16) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 17) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 16) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/chec tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 11) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: Traceback (most recent call last): (RANK 18) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 17) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank2]: raise CheckpointingException(_msg) [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096kpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 18) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 18) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 17) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 12) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 19) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 13) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096kpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [ranheckpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 14) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointikpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: raise CheckpointingException(_msg) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 15) [rank24]: Traceback (most recent call last): (RANK 18) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 17) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 17) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096ng/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 18) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: Traceback (most recent call last): (RANK 14) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 18) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 16) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 15) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096ng/strategies/torch.py", line 605, in create_local_plan [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loadheckpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 14) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 16) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 14) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 25) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: Traceback (most recent call last): (RANK 15) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 17) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 15) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 26) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 16) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 16) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/check9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fo])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 19) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_load2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 18) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 12) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 19) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 20) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: Traceback (most recent call last): (RANK 20) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 13) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: raise CheckpointingException(_msg) [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 21) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 21) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 20) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raik8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatro])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 19) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_ct/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 21) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 28) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatrokpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 20) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 17) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: Traceback (most recent call last): (RANK 20) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 29) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 21) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 21) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: rain/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatro])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 19) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: megatron.core.dist_checkpointing.core.Checkpoiner.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 17) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 18) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 22) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096o.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 20) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 30) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: Traceback (most recent call last): (RANK 23) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 24) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/sitekpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 21) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 18) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: Traceback (most recent call last): (RANK 31) [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatror loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 27) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 19) [rank29]: local_data = map_fun() [rank29]: ^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank29]: result = func(*args, **kwargs) [rank29]: ^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank29]: local_plan = planner.create_local_plan() [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank29]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [raner.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank29]: raise CheckpointingException(_msg) [rank29]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight n/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank11]: Traceback (most recent call last): (RANK 17) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 17) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 22) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 18) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: Traceback (most recent call last): (RANK 28) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 29) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 18) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 23) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hakpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 19) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 19) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: Traceback (most recent call last): (RANK 24) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 19) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 20) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 17) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 20) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: Traceback (most recent call last): (RANK 20) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 21) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank22]: Traceback (most recent call last): (RANK 18) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 21) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatrose CheckpointingException(_msg) [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096ng/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 21) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 22) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank17]: Traceback (most recent call last): (RANK 14) [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raier.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatrokpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 23) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 17) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 17) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 15) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 24) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 18) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: local_plan = planner.create_local_plan() [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 16) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/chec tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 11) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank26]: Traceback (most recent call last): (RANK 18) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 19) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [raner.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 17) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 25) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank16]: Traceback (most recent call last): (RANK 12) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 17) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 13) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 18) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 18) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointin/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank24]: Traceback (most recent call last): (RANK 26) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 19) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [ranheckpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 14) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fo])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 19) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank12]: raise CheckpointingException(_msg) [rank21]: Traceback (most recent call last): (RANK 22) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 19) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 23) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 20) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 15) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 24) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/sitekpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 16) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank25]: Traceback (most recent call last): (RANK 20) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 21) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loadtingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 30) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 21) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatro-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatro])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 19) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 25) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank4]: Traceback (most recent call last): (RANK 31) [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank4]: local_data = map_fun() [rank4]: ^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: result = func(*args, **kwargs) [rank4]: ^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank4]: local_plan = planner.create_local_plan() [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 17) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank4]: raise CheckpointingException(_msg) [rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight k0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 26) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 20) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 18) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank0]: Traceback (most recent call last): (RANK 20) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 21) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch foank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096o.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 21) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatror loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 27) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 25) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raik5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 26) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 30) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 28) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: Traceback (most recent call last): (RANK 31) [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank19]: local_data = map_fun() [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 20) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 29) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank19]: ^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank19]: result = func(*args, **kwargs) [rank19]: ^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank19]: local_plan = planner.create_local_plan() [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank19]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/han/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: Traceback (most recent call last): (RANK 27) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnse CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 22) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank19]: raise CheckpointingException(_msg) [rank19]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight ng/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 14) [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 21) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 22) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank25]: local_plan = planner.create_local_plan() [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 23) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 15) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 22) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 23) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 24) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 23) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 16) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/chec])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 19) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 24) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 19) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rn/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 22) [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 24) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rk7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 23) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: Traceback (most recent call last): (RANK 20) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 20) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 21) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 20) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 21) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 24) [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatroo.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 19) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 22) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 21) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 30) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 23) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raik1]: local_data = map_fun() [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: Traceback (most recent call last): (RANK 20) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank24]: Traceback (most recent call last): (RANK 31) [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 21) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 24) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 20) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_data = map_fun() [rank24]: ^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank24]: result = func(*args, **kwargs) [rank24]: ^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank24]: local_plan = planner.create_local_plan() [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank24]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 25) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank24]: raise CheckpointingException(_msg) [rank24]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight -packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatrot/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 21) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 25) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 28) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 26) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raier.py", line 223, in local_step [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fo])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 19) [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 17) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 29) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: Traceback (most recent call last): (RANK 26) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fon/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.Checkpoinn/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 20) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 18) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank28]: Traceback (most recent call last): (RANK 22) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 22) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 19) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [ranse CheckpointingException(_msg) [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 23) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 23) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank17]: Traceback (most recent call last): (RANK 21) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 22) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 23) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 24) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/sitewrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: raise CheckpointingException(_msg) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 22) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 9) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 24) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 25) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank23]: Traceback (most recent call last): (RANK 23) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 24) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 24) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rse CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 22) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: Traceback (most recent call last): (RANK 10) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 26) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/siter loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 27) [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 23) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096))n/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch foank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 22) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 25) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 28) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 26) [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 24) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 23) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 29) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 25) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/han/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 24) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/siten/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 27) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mntingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 30) [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 22) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 22) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 26) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank8]: Traceback (most recent call last): (RANK 31) [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank8]: local_data = map_fun() [rank8]: ^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 23) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 27) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 23) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank8]: result = func(*args, **kwargs) [rank8]: ^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank8]: local_plan = planner.create_local_plan() [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank8]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank8]: raise CheckpointingException(_msg) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 24) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnse CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 22) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank8]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight r loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 23) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 24) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 11) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank15]: Traceback (most recent call last): (RANK 27) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 28) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 25) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 12) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 26) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 24) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 13) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 29) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: local_plan = planner.create_local_plan() [rse CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 22) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointi-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/har loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: raise CheckpointingException(_msg) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 23) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 27) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch foo.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 30) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank30]: Traceback (most recent call last): (RANK 25) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 28) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank21]: Traceback (most recent call last): (RANK 31) [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank21]: local_data = map_fun() [rank21]: ^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank21]: result = func(*args, **kwargs) [rank21]: ^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 24) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 26) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 29) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank21]: local_plan = planner.create_local_plan() [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank21]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank21]: raise CheckpointingException(_msg) [rank21]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight -packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: local_plan = planner.create_local_plan() [rk3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fo-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/ha-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 20) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 25) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: Traceback (most recent call last): (RANK 25) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: Traceback (most recent call last): (RANK 25) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 26) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 26) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fo-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 21) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 26) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch foo.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 30) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raiank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 25) [rank25]: Traceback (most recent call last): (RANK 27) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: Traceback (most recent call last): (RANK 25) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]: Traceback (most recent call last): (RANK 31) [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank15]: local_data = map_fun() [rank15]: ^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank15]: result = func(*args, **kwargs) [rank15]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 26) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 28) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank15]: local_plan = planner.create_local_plan() [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank15]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank15]: raise CheckpointingException(_msg) [rank15]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight r loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 27) [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fokpoint/logger.py", line 87, in wrapper [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 26) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 27) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank25]: Traceback (most recent call last): (RANK 29) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/ha-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 28) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: ^^^^^^^^^ [rank0]: File "/mnank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 25) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank16]: Traceback (most recent call last): (RANK 17) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: Traceback (most recent call last): (RANK 25) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 29) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 18) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hang/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 26) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 26) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 14) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096n/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 27) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fong/strategies/torch.py", line 605, in create_local_plan [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 22) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 14) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 28) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 23) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank2]: local_plan = planner.create_local_plan() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 15) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 24) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 29) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank31]: Traceback (most recent call last): (RANK 15) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 16) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank22]: ^^^^^^^^^ [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 16) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checn/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/siter loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 27) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.Checkpoinank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 25) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checr loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 27) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 22) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 28) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 28) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 23) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 29) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 26) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 24) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 27) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 29) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^ [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/har loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 27) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 25) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/har loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 27) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/siteo.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 30) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 28) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 28) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank11]: Traceback (most recent call last): (RANK 31) [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank11]: local_data = map_fun() [rank11]: ^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank11]: result = func(*args, **kwargs) [rank11]: ^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 26) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank11]: local_plan = planner.create_local_plan() [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank11]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank11]: raise CheckpointingException(_msg) [rank11]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight kpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 27) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 29) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 29) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnse CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 22) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 30) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank13]: Traceback (most recent call last): (RANK 17) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 23) [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 18) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/ha-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 25) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 24) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank25]: Traceback (most recent call last): (RANK 31) [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank25]: local_data = map_fun() [rank25]: ^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank25]: result = func(*args, **kwargs) [rank25]: ^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank25]: local_plan = planner.create_local_plan() [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096t/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 26) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank25]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank25]: raise CheckpointingException(_msg) [rank25]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight r loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 27) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 28) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 28) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 29) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 27) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 29) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 28) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 28) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 29) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.Checkpoin])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 19) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.Checkpoint/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hakpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 29) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 28) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 17) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank13]: Traceback (most recent call last): (RANK 20) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: ^^^^^^^^^ [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 21) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/ha])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 19) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 29) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 20) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 18) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatrotingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 30) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 30) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096o.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 21) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 30) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank9]: Traceback (most recent call last): (RANK 31) [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank9]: local_data = map_fun() [rank9]: ^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank9]: result = func(*args, **kwargs) [rank9]: ^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: local_plan = planner.create_local_plan() [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank2]: Traceback (most recent call last): (RANK 31) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank9]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank9]: raise CheckpointingException(_msg) [rank9]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight n/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatroo.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 30) [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank2]: local_data = map_fun() [rank2]: ^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank2]: result = func(*args, **kwargs) [rank2]: ^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank2]: local_plan = planner.create_local_plan() [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank30]: Traceback (most recent call last): (RANK 31) [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank2]: raise CheckpointingException(_msg) [rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight t/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank30]: local_data = map_fun() [rank30]: ^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank30]: result = func(*args, **kwargs) [rank30]: ^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank30]: local_plan = planner.create_local_plan() [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank30]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 22) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank18]: Traceback (most recent call last): (RANK 31) [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank18]: local_data = map_fun() [rank18]: ^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 28) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank30]: raise CheckpointingException(_msg) [rank30]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight o.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 23) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank18]: result = func(*args, **kwargs) [rank18]: ^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank18]: local_plan = planner.create_local_plan() [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank18]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: raise CheckpointingException(_msg) [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 30) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank18]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight o.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: raise CheckpointingException(_msg) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 24) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 30) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 29) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank28]: Traceback (most recent call last): (RANK 31) [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank28]: local_data = map_fun() [rank28]: ^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank28]: result = func(*args, **kwargs) [rank28]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 25) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.Checkpoint/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank28]: local_plan = planner.create_local_plan() [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank28]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank28]: raise CheckpointingException(_msg) [rank28]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank17]: Traceback (most recent call last): (RANK 31) [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank17]: local_data = map_fun() [rank17]: ^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank17]: result = func(*args, **kwargs) [rank17]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank17]: local_plan = planner.create_local_plan() [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 28) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter o.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 30) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 26) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank17]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank17]: raise CheckpointingException(_msg) [rank17]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight r loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 27) [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch fo-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank26]: Traceback (most recent call last): (RANK 31) [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank26]: local_data = map_fun() [rank26]: ^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank26]: result = func(*args, **kwargs) [rank26]: ^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank26]: local_plan = planner.create_local_plan() [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 29) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank26]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank26]: raise CheckpointingException(_msg) [rank26]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight ])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 19) [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 25) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 28) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.Checkpoinank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 29) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 25) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 20) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 26) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 26) [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 30) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 27) [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank22]: Traceback (most recent call last): (RANK 31) [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: Traceback (most recent call last): (RANK 21) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: local_data = map_fun() [rank22]: ^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: result = func(*args, **kwargs) [rank22]: ^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank22]: local_plan = planner.create_local_plan() [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank22]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 27) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mntingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 30) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 22) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 28) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank22]: raise CheckpointingException(_msg) [rank22]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight o.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank0]: local_plan = planner.create_local_plan() [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 30) [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank0]: Traceback (most recent call last): (RANK 31) [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank0]: local_data = map_fun() [rank0]: ^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: result = func(*args, **kwargs) [rank0]: ^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 29) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: local_plan = planner.create_local_plan() [rank31]: Traceback (most recent call last): (RANK 23) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank23]: Traceback (most recent call last): (RANK 31) [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank23]: local_data = map_fun() [rank23]: ^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank0]: raise CheckpointingException(_msg) [rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight tingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 30) [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 24) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 30) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank23]: result = func(*args, **kwargs) [rank23]: ^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank23]: local_plan = planner.create_local_plan() [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank23]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank23]: raise CheckpointingException(_msg) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank23]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight n/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 22) [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank5]: Traceback (most recent call last): (RANK 31) [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank5]: local_data = map_fun() [rank5]: ^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 25) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank10]: Traceback (most recent call last): (RANK 31) [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank10]: local_data = map_fun() [rank10]: ^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank10]: result = func(*args, **kwargs) [rank10]: ^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank10]: local_plan = planner.create_local_plan() [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank5]: result = func(*args, **kwargs) [rank5]: ^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank5]: local_plan = planner.create_local_plan() [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank5]: raise CheckpointingException(_msg) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank10]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank10]: raise CheckpointingException(_msg) [rank10]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight r loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 27) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight tingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 26) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 23) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank7]: Traceback (most recent call last): (RANK 30) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 24) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank7]: Traceback (most recent call last): (RANK 31) [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank7]: local_data = map_fun() [rank7]: ^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 27) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 28) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: result = func(*args, **kwargs) [rank7]: ^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: local_plan = planner.create_local_plan() [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank7]: raise CheckpointingException(_msg) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 28) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 29) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 25) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight tingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 30) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: ^^^^^^^^^ [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 26) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 30) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank1]: Traceback (most recent call last): (RANK 31) [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank1]: local_data = map_fun() [rank1]: ^^^^^^^^^ [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 29) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank13]: Traceback (most recent call last): (RANK 31) [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 27) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank1]: result = func(*args, **kwargs) [rank1]: ^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: local_plan = planner.create_local_plan() [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank1]: raise CheckpointingException(_msg) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 30) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank13]: local_data = map_fun() [rank13]: ^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank13]: result = func(*args, **kwargs) [rank13]: ^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank13]: local_plan = planner.create_local_plan() [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank13]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight t/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank13]: raise CheckpointingException(_msg) [rank13]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight o.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 28) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 28) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank31]: Traceback (most recent call last): (RANK 31) [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank31]: local_data = map_fun() [rank31]: ^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank31]: result = func(*args, **kwargs) [rank31]: ^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 30) [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 29) [rank31]: local_plan = planner.create_local_plan() [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank31]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank31]: raise CheckpointingException(_msg) [rank31]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 29) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank12]: Traceback (most recent call last): (RANK 31) [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank12]: local_data = map_fun() [rank12]: ^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 30) [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank12]: result = func(*args, **kwargs) [rank12]: ^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank12]: local_plan = planner.create_local_plan() [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank12]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank12]: raise CheckpointingException(_msg) [rank16]: Traceback (most recent call last): (RANK 30) [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank12]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank3]: Traceback (most recent call last): (RANK 31) [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: Traceback (most recent call last): (RANK 31) [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank16]: local_data = map_fun() [rank16]: ^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter [rank3]: local_data = map_fun() [rank3]: ^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper [rank3]: result = func(*args, **kwargs) [rank3]: ^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank3]: local_plan = planner.create_local_plan() [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: result = func(*args, **kwargs) [rank16]: ^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step [rank16]: local_plan = planner.create_local_plan() [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan [rank16]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank16]: raise CheckpointingException(_msg) [rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes [rank3]: raise CheckpointingException(_msg) [rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank16]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([8192, 4096])) and expected ((98304, 4096)) tensor for key embedding.position_embeddings.weight [rank15]:[W621 21:18:12.730366017 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:18:12.854425315 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:18:12.884689083 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:18:12.892840203 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:18:12.329414695 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:18:12.006274222 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:18:12.008063668 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:18:12.023853646 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank3]:[W621 21:18:13.049287473 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:18:13.396143435 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:18:13.471852888 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank0]:[W621 21:18:13.075051454 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:18:13.520131058 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:18:13.522251538 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:18:13.454048138 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:18:13.534307461 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:18:13.011336068 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:18:13.019226767 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank16]:[W621 21:18:13.485985854 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:18:13.565394565 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:18:13.044265744 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:18:13.494829984 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:18:13.570449573 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:18:13.501731796 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:18:13.506286264 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:18:13.579961778 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:18:13.058338237 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:18:13.522871829 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:18:13.112113716 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank24]:[W621 21:18:13.635204984 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:18:13.120021984 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank8]:[W621 21:18:13.425964397 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:18:13.804000 145950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 146038 closing signal SIGTERM W0621 21:18:13.804000 145950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 146039 closing signal SIGTERM W0621 21:18:13.805000 145950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 146040 closing signal SIGTERM W0621 21:18:13.805000 145950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 146041 closing signal SIGTERM W0621 21:18:13.805000 145950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 146042 closing signal SIGTERM W0621 21:18:13.806000 145950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 146043 closing signal SIGTERM W0621 21:18:13.806000 145950 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 146044 closing signal SIGTERM W0621 21:18:13.852000 3365780 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3365849 closing signal SIGTERM W0621 21:18:13.853000 3365780 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3365850 closing signal SIGTERM W0621 21:18:13.853000 3365780 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3365852 closing signal SIGTERM W0621 21:18:13.854000 3365780 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3365853 closing signal SIGTERM W0621 21:18:13.854000 3365780 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3365854 closing signal SIGTERM W0621 21:18:13.854000 3365780 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3365855 closing signal SIGTERM W0621 21:18:13.855000 3365780 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3365856 closing signal SIGTERM W0621 21:18:13.931000 3434539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3434609 closing signal SIGTERM W0621 21:18:13.932000 3434539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3434610 closing signal SIGTERM W0621 21:18:13.933000 3434539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3434611 closing signal SIGTERM W0621 21:18:13.936000 2065087 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2065156 closing signal SIGTERM W0621 21:18:13.934000 3434539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3434613 closing signal SIGTERM W0621 21:18:13.938000 2065087 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2065158 closing signal SIGTERM W0621 21:18:13.934000 3434539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3434614 closing signal SIGTERM W0621 21:18:13.938000 2065087 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2065159 closing signal SIGTERM W0621 21:18:13.934000 3434539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3434615 closing signal SIGTERM W0621 21:18:13.939000 2065087 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2065160 closing signal SIGTERM W0621 21:18:13.935000 3434539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3434616 closing signal SIGTERM W0621 21:18:13.939000 2065087 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2065161 closing signal SIGTERM W0621 21:18:13.939000 2065087 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2065162 closing signal SIGTERM W0621 21:18:13.940000 2065087 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2065163 closing signal SIGTERM E0621 21:18:14.371000 145950 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 146045) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:18:13 host : fs-mbz-gpu-852 rank : 7 (local_rank: 7) exitcode : 1 (pid: 146045) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:18:14.533000 3365780 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 3365851) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:18:14.542000 3365780 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3365780_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:18:14.914299381 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:60950, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14ecb05785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14ec9985aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14ec9985c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) E0621 21:18:14.550000 3434539 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 3 (pid: 3434612) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 frame #3: + 0x5babb3e (0x14ec9985db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14ec99857ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14ec99857ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14ec99858f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14eca8b8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14eca82fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14ecb1899d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14ecb1899e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:18:14.555000 3365780 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3365780_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:18:14.559000 3434539 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3434539_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:18:14.925706677 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:60950, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14ecb05785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14ec9985aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14ec9985c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14ec9985db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14ec99857ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14ec99857ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14ec99858f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14eca8b8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14eca82fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14ecb1899d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14ecb1899e40 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:18:14.477643893 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:35164, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14bf2bb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14bf14e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14bf14e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) W0621 21:18:14.564000 3365780 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3365780_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. frame #3: + 0x5babb3e (0x14bf14e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14bf14e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14bf14e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) Traceback (most recent call last): File "", line 198, in _run_module_as_main frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14bf14e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14bf2418b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14bf238fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14bf2ce4ed90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14bf2ce4ee40 in /lib/x86_64-linux-gnu/libc.so.6) File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in W0621 21:18:14.571000 3434539 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3434539_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) [W621 21:18:14.489098169 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:35164, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x14bf2bb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14bf14e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14bf14e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main frame #3: + 0x5babb3e (0x14bf14e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x14bf14e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x14bf14e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x14bf14e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14bf2418b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14bf238fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x14bf2ce4ed90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x14bf2ce4ee40 in /lib/x86_64-linux-gnu/libc.so.6) run(args) W0621 21:18:14.581000 3434539 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3434539_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run Traceback (most recent call last): elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in return launch_agent(self._config, self._entrypoint, list(args)) main() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:18:13 host : fs-mbz-gpu-881 rank : 18 (local_rank: 2) exitcode : 1 (pid: 3365851) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:18:13 host : fs-mbz-gpu-870 rank : 11 (local_rank: 3) exitcode : 1 (pid: 3434612) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x E0621 21:18:14.655000 2065087 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 2065157) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:18:14.664000 2065087 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2065087_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:18:14.103771668 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:57862, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1458675785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14585045aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14585045c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14585045db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x145850457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x145850457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x145850458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14585f78b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14585eefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x1458685f0d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x1458685f0e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:18:14.676000 2065087 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2065087_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:18:14.115347909 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-901]:57862, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1458675785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x14585045aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x14585045c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x14585045db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x145850457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x145850457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x145850458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x14585f78b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x14585eefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x1458685f0d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x1458685f0e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:18:14.686000 2065087 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2065087_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:18:13 host : fs-mbz-gpu-901 rank : 25 (local_rank: 1) exitcode : 1 (pid: 2065157) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 + export PROF_CTX_LENGTH=131072 + PROF_CTX_LENGTH=131072 + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L131072*tp8.cp4.bs32.json' + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L131072*tp8.cp4.bs32.json' ']' + echo 'Running ctx_length=131072, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=32' + srun bash ./attnserver.sh + which python3 + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 131072 --max-position-embeddings 131072 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 131072 --max-position-embeddings 131072 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 131072 --max-position-embeddings 131072 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ + which python3 + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343200 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 131072 --max-position-embeddings 131072 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:18:18.894000 2066939 site-packages/torch/distributed/run.py:766] W0621 21:18:18.894000 2066939 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:18:18.894000 2066939 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:18:18.894000 2066939 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:18:18.897000 147855 site-packages/torch/distributed/run.py:766] W0621 21:18:18.897000 147855 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:18:18.897000 147855 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:18:18.897000 147855 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:18:18.972000 3436378 site-packages/torch/distributed/run.py:766] W0621 21:18:18.972000 3436378 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:18:18.972000 3436378 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:18:18.972000 3436378 site-packages/torch/distributed/run.py:766] ***************************************** /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated and will be removed in future. Use torchrun. Note that --use-env is set by default in torchrun. If your script expects `--local-rank` argument to be set, please change it to read from `os.environ['LOCAL_RANK']` instead. See https://pytorch.org/docs/stable/distributed.html#launch-utility for further instructions main() W0621 21:18:18.988000 3367613 site-packages/torch/distributed/run.py:766] W0621 21:18:18.988000 3367613 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:18:18.988000 3367613 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:18:18.988000 3367613 site-packages/torch/distributed/run.py:766] ***************************************** [rank24]:[W621 21:18:42.554357258 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank8]:[W621 21:18:42.035138652 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank0]:[W621 21:18:42.391868585 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank6]:[W621 21:18:42.402904442 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank30]:[W621 21:18:42.811481175 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank14]:[W621 21:18:42.290816965 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank22]:[W621 21:18:42.747563206 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank21]:[W621 21:18:42.763113843 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank5]:[W621 21:18:42.424303197 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank29]:[W621 21:18:42.833437618 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank13]:[W621 21:18:42.313957413 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank16]:[W621 21:18:42.787467682 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank2]:[W621 21:18:42.450942546 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank10]:[W621 21:18:42.338323109 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank4]:[W621 21:18:42.454002011 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank12]:[W621 21:18:42.341462752 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank28]:[W621 21:18:42.863060268 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank18]:[W621 21:18:42.794697809 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank20]:[W621 21:18:42.795253105 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank31]:[W621 21:18:42.864798824 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank7]:[W621 21:18:42.456925159 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank26]:[W621 21:18:42.865945000 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank1]:[W621 21:18:42.461739285 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank27]:[W621 21:18:42.870247700 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank23]:[W621 21:18:42.802087846 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank25]:[W621 21:18:42.871020249 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank15]:[W621 21:18:42.350285282 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank11]:[W621 21:18:42.351569140 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank19]:[W621 21:18:42.805579574 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank9]:[W621 21:18:42.352847405 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank3]:[W621 21:18:42.466109828 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. [rank17]:[W621 21:18:42.808308185 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. warnings.warn( [rank22]: Traceback (most recent call last): [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank22]: pretrain( [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank22]: iteration, num_floating_point_operations_so_far = train( [rank22]: ^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank22]: ) = train_step( [rank22]: ^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank22]: losses_reduced = forward_backward_func( [rank22]: ^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank22]: output_tensor, num_tokens = forward_step( [rank22]: ^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank22]: batch = next(global_batches) [rank22]: ^^^^^^^^^^^^^^^^^^^^ [rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank22]: attention_mask = torch.ones( [rank22]: ^^^^^^^^^^^ [rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank21]: Traceback (most recent call last): [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank21]: pretrain( [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank21]: iteration, num_floating_point_operations_so_far = train( [rank21]: ^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank21]: ) = train_step( [rank21]: ^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank21]: losses_reduced = forward_backward_func( [rank21]: ^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank21]: output_tensor, num_tokens = forward_step( [rank21]: ^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank21]: batch = next(global_batches) [rank21]: ^^^^^^^^^^^^^^^^^^^^ [rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank21]: attention_mask = torch.ones( [rank21]: ^^^^^^^^^^^ [rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank19]: Traceback (most recent call last): [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank19]: pretrain( [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank19]: iteration, num_floating_point_operations_so_far = train( [rank19]: ^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank19]: ) = train_step( [rank19]: ^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank19]: losses_reduced = forward_backward_func( [rank19]: ^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: output_tensor, num_tokens = forward_step( [rank19]: ^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: Traceback (most recent call last): [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank6]: pretrain( [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank6]: iteration, num_floating_point_operations_so_far = train( [rank6]: ^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank6]: ) = train_step( [rank6]: ^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank6]: losses_reduced = forward_backward_func( [rank6]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank19]: batch = next(global_batches) [rank19]: ^^^^^^^^^^^^^^^^^^^^ [rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank19]: attention_mask = torch.ones( [rank19]: ^^^^^^^^^^^ [rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank6]: output_tensor, num_tokens = forward_step( [rank6]: ^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank6]: batch = next(global_batches) [rank6]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: Traceback (most recent call last): [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank30]: pretrain( [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank30]: iteration, num_floating_point_operations_so_far = train( [rank30]: ^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank30]: ) = train_step( [rank30]: ^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank30]: losses_reduced = forward_backward_func( [rank30]: ^^^^^^^^^^^^^^^^^^^^^^ [rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank6]: attention_mask = torch.ones( [rank6]: ^^^^^^^^^^^ [rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: output_tensor, num_tokens = forward_step( [rank30]: ^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: Traceback (most recent call last): [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank7]: pretrain( [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank7]: iteration, num_floating_point_operations_so_far = train( [rank7]: ^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank7]: ) = train_step( [rank7]: ^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank7]: losses_reduced = forward_backward_func( [rank7]: ^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank30]: batch = next(global_batches) [rank30]: ^^^^^^^^^^^^^^^^^^^^ [rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank30]: attention_mask = torch.ones( [rank30]: ^^^^^^^^^^^ [rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank7]: output_tensor, num_tokens = forward_step( [rank7]: ^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank7]: batch = next(global_batches) [rank7]: ^^^^^^^^^^^^^^^^^^^^ [rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank7]: attention_mask = torch.ones( [rank7]: ^^^^^^^^^^^ [rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]: Traceback (most recent call last): [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank3]: pretrain( [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank3]: iteration, num_floating_point_operations_so_far = train( [rank3]: ^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank3]: ) = train_step( [rank3]: ^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank3]: losses_reduced = forward_backward_func( [rank3]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank3]: output_tensor, num_tokens = forward_step( [rank3]: ^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank3]: batch = next(global_batches) [rank3]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: Traceback (most recent call last): [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank29]: pretrain( [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank29]: iteration, num_floating_point_operations_so_far = train( [rank29]: ^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank29]: ) = train_step( [rank29]: ^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank29]: losses_reduced = forward_backward_func( [rank29]: ^^^^^^^^^^^^^^^^^^^^^^ [rank14]: Traceback (most recent call last): [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank14]: pretrain( [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank14]: iteration, num_floating_point_operations_so_far = train( [rank14]: ^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank14]: ) = train_step( [rank14]: ^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank14]: losses_reduced = forward_backward_func( [rank14]: ^^^^^^^^^^^^^^^^^^^^^^ [rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank3]: attention_mask = torch.ones( [rank3]: ^^^^^^^^^^^ [rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank29]: output_tensor, num_tokens = forward_step( [rank29]: ^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank14]: output_tensor, num_tokens = forward_step( [rank14]: ^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank29]: batch = next(global_batches) [rank29]: ^^^^^^^^^^^^^^^^^^^^ [rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank29]: attention_mask = torch.ones( [rank29]: ^^^^^^^^^^^ [rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank14]: batch = next(global_batches) [rank14]: ^^^^^^^^^^^^^^^^^^^^ [rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank14]: attention_mask = torch.ones( [rank14]: ^^^^^^^^^^^ [rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank31]: Traceback (most recent call last): [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank31]: pretrain( [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank31]: iteration, num_floating_point_operations_so_far = train( [rank31]: ^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank31]: ) = train_step( [rank31]: ^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank31]: losses_reduced = forward_backward_func( [rank31]: ^^^^^^^^^^^^^^^^^^^^^^ [rank20]: Traceback (most recent call last): [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank20]: pretrain( [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank20]: iteration, num_floating_point_operations_so_far = train( [rank20]: ^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank20]: ) = train_step( [rank20]: ^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank20]: losses_reduced = forward_backward_func( [rank20]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: Traceback (most recent call last): [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank13]: pretrain( [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank13]: iteration, num_floating_point_operations_so_far = train( [rank13]: ^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank13]: ) = train_step( [rank13]: ^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank13]: losses_reduced = forward_backward_func( [rank13]: ^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank31]: output_tensor, num_tokens = forward_step( [rank31]: ^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank20]: output_tensor, num_tokens = forward_step( [rank20]: ^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank13]: output_tensor, num_tokens = forward_step( [rank13]: ^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank31]: batch = next(global_batches) [rank31]: ^^^^^^^^^^^^^^^^^^^^ [rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank31]: attention_mask = torch.ones( [rank31]: ^^^^^^^^^^^ [rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank20]: batch = next(global_batches) [rank20]: ^^^^^^^^^^^^^^^^^^^^ [rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank20]: attention_mask = torch.ones( [rank20]: ^^^^^^^^^^^ [rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank13]: batch = next(global_batches) [rank13]: ^^^^^^^^^^^^^^^^^^^^ [rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank13]: attention_mask = torch.ones( [rank13]: ^^^^^^^^^^^ [rank27]: Traceback (most recent call last): [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank27]: pretrain( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank27]: iteration, num_floating_point_operations_so_far = train( [rank27]: ^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank27]: ) = train_step( [rank27]: ^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank27]: losses_reduced = forward_backward_func( [rank27]: ^^^^^^^^^^^^^^^^^^^^^^ [rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: Traceback (most recent call last): [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank12]: pretrain( [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank12]: iteration, num_floating_point_operations_so_far = train( [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: output_tensor, num_tokens = forward_step( [rank27]: ^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: ^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank12]: ) = train_step( [rank12]: ^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank12]: losses_reduced = forward_backward_func( [rank12]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank12]: output_tensor, num_tokens = forward_step( [rank12]: ^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: Traceback (most recent call last): [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank2]: pretrain( [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank2]: iteration, num_floating_point_operations_so_far = train( [rank2]: ^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank2]: ) = train_step( [rank2]: ^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank2]: losses_reduced = forward_backward_func( [rank2]: ^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank27]: batch = next(global_batches) [rank27]: ^^^^^^^^^^^^^^^^^^^^ [rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank27]: attention_mask = torch.ones( [rank27]: ^^^^^^^^^^^ [rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank12]: batch = next(global_batches) [rank12]: ^^^^^^^^^^^^^^^^^^^^ [rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank12]: attention_mask = torch.ones( [rank12]: ^^^^^^^^^^^ [rank2]: output_tensor, num_tokens = forward_step( [rank2]: ^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank2]: batch = next(global_batches) [rank2]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: Traceback (most recent call last): [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank23]: pretrain( [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank23]: iteration, num_floating_point_operations_so_far = train( [rank23]: ^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank23]: ) = train_step( [rank23]: ^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank23]: losses_reduced = forward_backward_func( [rank23]: ^^^^^^^^^^^^^^^^^^^^^^ [rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank2]: attention_mask = torch.ones( [rank2]: ^^^^^^^^^^^ [rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank23]: output_tensor, num_tokens = forward_step( [rank23]: ^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank23]: batch = next(global_batches) [rank23]: ^^^^^^^^^^^^^^^^^^^^ [rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank23]: attention_mask = torch.ones( [rank23]: ^^^^^^^^^^^ [rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank17]: Traceback (most recent call last): [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank17]: pretrain( [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank17]: iteration, num_floating_point_operations_so_far = train( [rank17]: ^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank17]: ) = train_step( [rank17]: ^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank17]: losses_reduced = forward_backward_func( [rank17]: ^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: output_tensor, num_tokens = forward_step( [rank17]: ^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: Traceback (most recent call last): [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank8]: pretrain( [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank8]: iteration, num_floating_point_operations_so_far = train( [rank8]: ^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank8]: ) = train_step( [rank8]: ^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank8]: losses_reduced = forward_backward_func( [rank8]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank4]: Traceback (most recent call last): [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank4]: pretrain( [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank4]: iteration, num_floating_point_operations_so_far = train( [rank4]: ^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank4]: ) = train_step( [rank4]: ^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank4]: losses_reduced = forward_backward_func( [rank4]: ^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank17]: batch = next(global_batches) [rank17]: ^^^^^^^^^^^^^^^^^^^^ [rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank17]: attention_mask = torch.ones( [rank17]: ^^^^^^^^^^^ [rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank8]: output_tensor, num_tokens = forward_step( [rank8]: ^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank8]: batch = next(global_batches) [rank8]: ^^^^^^^^^^^^^^^^^^^^ [rank4]: output_tensor, num_tokens = forward_step( [rank4]: ^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank4]: batch = next(global_batches) [rank4]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: Traceback (most recent call last): [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank25]: pretrain( [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank25]: iteration, num_floating_point_operations_so_far = train( [rank25]: ^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank25]: ) = train_step( [rank25]: ^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank25]: losses_reduced = forward_backward_func( [rank25]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: Traceback (most recent call last): [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank18]: pretrain( [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank18]: iteration, num_floating_point_operations_so_far = train( [rank18]: ^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank18]: ) = train_step( [rank18]: ^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank18]: losses_reduced = forward_backward_func( [rank18]: ^^^^^^^^^^^^^^^^^^^^^^ [rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank8]: attention_mask = torch.ones( [rank8]: ^^^^^^^^^^^ [rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank4]: attention_mask = torch.ones( [rank4]: ^^^^^^^^^^^ [rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: output_tensor, num_tokens = forward_step( [rank25]: ^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank18]: output_tensor, num_tokens = forward_step( [rank18]: ^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: Traceback (most recent call last): [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank10]: pretrain( [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank10]: iteration, num_floating_point_operations_so_far = train( [rank10]: ^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank10]: ) = train_step( [rank10]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank10]: losses_reduced = forward_backward_func( [rank10]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: Traceback (most recent call last): [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank5]: pretrain( [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank5]: iteration, num_floating_point_operations_so_far = train( [rank5]: ^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank5]: ) = train_step( [rank5]: ^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank5]: losses_reduced = forward_backward_func( [rank5]: ^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank25]: batch = next(global_batches) [rank25]: ^^^^^^^^^^^^^^^^^^^^ [rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank25]: attention_mask = torch.ones( [rank25]: ^^^^^^^^^^^ [rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank18]: batch = next(global_batches) [rank18]: ^^^^^^^^^^^^^^^^^^^^ [rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank18]: attention_mask = torch.ones( [rank18]: ^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank10]: output_tensor, num_tokens = forward_step( [rank10]: ^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: output_tensor, num_tokens = forward_step( [rank5]: ^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank5]: batch = next(global_batches) [rank5]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: Traceback (most recent call last): [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank28]: pretrain( [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank28]: iteration, num_floating_point_operations_so_far = train( [rank28]: ^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank28]: ) = train_step( [rank28]: ^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank28]: losses_reduced = forward_backward_func( [rank28]: ^^^^^^^^^^^^^^^^^^^^^^ [rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank10]: batch = next(global_batches) [rank10]: ^^^^^^^^^^^^^^^^^^^^ [rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank10]: attention_mask = torch.ones( [rank10]: ^^^^^^^^^^^ [rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank5]: attention_mask = torch.ones( [rank5]: ^^^^^^^^^^^ [rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: output_tensor, num_tokens = forward_step( [rank28]: ^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: Traceback (most recent call last): [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank16]: pretrain( [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank16]: iteration, num_floating_point_operations_so_far = train( [rank16]: ^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank16]: ) = train_step( [rank16]: ^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank16]: losses_reduced = forward_backward_func( [rank16]: ^^^^^^^^^^^^^^^^^^^^^^ [rank15]: Traceback (most recent call last): [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank15]: pretrain( [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank15]: iteration, num_floating_point_operations_so_far = train( [rank15]: ^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank15]: ) = train_step( [rank15]: ^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank15]: losses_reduced = forward_backward_func( [rank15]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: Traceback (most recent call last): [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank1]: pretrain( [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank1]: iteration, num_floating_point_operations_so_far = train( [rank1]: ^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank1]: ) = train_step( [rank1]: ^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank1]: losses_reduced = forward_backward_func( [rank1]: ^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank28]: batch = next(global_batches) [rank28]: ^^^^^^^^^^^^^^^^^^^^ [rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank28]: attention_mask = torch.ones( [rank28]: ^^^^^^^^^^^ [rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank16]: output_tensor, num_tokens = forward_step( [rank16]: ^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank15]: output_tensor, num_tokens = forward_step( [rank15]: ^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: output_tensor, num_tokens = forward_step( [rank1]: ^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank1]: batch = next(global_batches) [rank1]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: Traceback (most recent call last): [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank26]: pretrain( [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank26]: iteration, num_floating_point_operations_so_far = train( [rank26]: ^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank26]: ) = train_step( [rank26]: ^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank26]: losses_reduced = forward_backward_func( [rank26]: ^^^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank16]: batch = next(global_batches) [rank16]: ^^^^^^^^^^^^^^^^^^^^ [rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank16]: attention_mask = torch.ones( [rank16]: ^^^^^^^^^^^ [rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank15]: batch = next(global_batches) [rank15]: ^^^^^^^^^^^^^^^^^^^^ [rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank15]: attention_mask = torch.ones( [rank15]: ^^^^^^^^^^^ [rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank1]: attention_mask = torch.ones( [rank1]: ^^^^^^^^^^^ [rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: output_tensor, num_tokens = forward_step( [rank26]: ^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: Traceback (most recent call last): [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank11]: pretrain( [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank11]: iteration, num_floating_point_operations_so_far = train( [rank11]: ^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank11]: ) = train_step( [rank11]: ^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank11]: losses_reduced = forward_backward_func( [rank11]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: Traceback (most recent call last): [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank0]: pretrain( [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank0]: iteration, num_floating_point_operations_so_far = train( [rank0]: ^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank0]: ) = train_step( [rank0]: ^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank0]: losses_reduced = forward_backward_func( [rank0]: ^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank26]: batch = next(global_batches) [rank26]: ^^^^^^^^^^^^^^^^^^^^ [rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank26]: attention_mask = torch.ones( [rank26]: ^^^^^^^^^^^ [rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank11]: output_tensor, num_tokens = forward_step( [rank11]: ^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: output_tensor, num_tokens = forward_step( [rank0]: ^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank0]: batch = next(global_batches) [rank0]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: Traceback (most recent call last): [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank24]: pretrain( [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank24]: iteration, num_floating_point_operations_so_far = train( [rank24]: ^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank24]: ) = train_step( [rank24]: ^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank24]: losses_reduced = forward_backward_func( [rank24]: ^^^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank11]: batch = next(global_batches) [rank11]: ^^^^^^^^^^^^^^^^^^^^ [rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank11]: attention_mask = torch.ones( [rank11]: ^^^^^^^^^^^ [rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank0]: attention_mask = torch.ones( [rank0]: ^^^^^^^^^^^ [rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: output_tensor, num_tokens = forward_step( [rank24]: ^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: Traceback (most recent call last): [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in [rank9]: pretrain( [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain [rank9]: iteration, num_floating_point_operations_so_far = train( [rank9]: ^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train [rank9]: ) = train_step( [rank9]: ^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step [rank9]: losses_reduced = forward_backward_func( [rank9]: ^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank24]: batch = next(global_batches) [rank24]: ^^^^^^^^^^^^^^^^^^^^ [rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank24]: attention_mask = torch.ones( [rank24]: ^^^^^^^^^^^ [rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank9]: output_tensor, num_tokens = forward_step( [rank9]: ^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step [rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step [rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) [rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch [rank9]: batch = next(global_batches) [rank9]: ^^^^^^^^^^^^^^^^^^^^ [rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches [rank9]: attention_mask = torch.ones( [rank9]: ^^^^^^^^^^^ [rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) [rank3]:[W621 21:18:57.492985540 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank2]:[W621 21:18:57.514744847 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank5]:[W621 21:18:57.606304392 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank4]:[W621 21:18:57.612537187 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank7]:[W621 21:18:57.623552814 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank6]:[W621 21:18:57.628497419 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank11]:[W621 21:18:57.516178219 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank21]:[W621 21:18:57.982950308 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank22]:[W621 21:18:57.988431638 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank12]:[W621 21:18:57.561029732 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank10]:[W621 21:18:57.569799373 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank23]:[W621 21:18:57.052838784 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank17]:[W621 21:18:57.066515913 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank1]:[W621 21:18:57.747437094 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank14]:[W621 21:18:57.646743323 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank27]:[W621 21:18:57.259225503 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank31]:[W621 21:18:57.272514379 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank28]:[W621 21:18:57.304177683 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank30]:[W621 21:18:57.309551879 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank19]:[W621 21:18:57.241249471 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank15]:[W621 21:18:57.801823915 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank18]:[W621 21:18:57.264390643 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank29]:[W621 21:18:57.333811722 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank26]:[W621 21:18:57.353360486 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank13]:[W621 21:18:57.832855414 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank20]:[W621 21:18:57.286132690 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank9]:[W621 21:18:57.845499305 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) [rank25]:[W621 21:18:57.367318431 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) W0621 21:18:58.751000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067009 closing signal SIGTERM W0621 21:18:58.753000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067010 closing signal SIGTERM W0621 21:18:58.754000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067011 closing signal SIGTERM W0621 21:18:58.755000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067012 closing signal SIGTERM W0621 21:18:58.755000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067013 closing signal SIGTERM W0621 21:18:58.755000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067014 closing signal SIGTERM W0621 21:18:58.755000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067015 closing signal SIGTERM W0621 21:18:58.775000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147926 closing signal SIGTERM W0621 21:18:58.778000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147927 closing signal SIGTERM W0621 21:18:58.779000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147928 closing signal SIGTERM W0621 21:18:58.779000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147929 closing signal SIGTERM W0621 21:18:58.779000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147930 closing signal SIGTERM W0621 21:18:58.779000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147933 closing signal SIGTERM W0621 21:18:58.780000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147934 closing signal SIGTERM W0621 21:18:58.802000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436449 closing signal SIGTERM W0621 21:18:58.804000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436450 closing signal SIGTERM W0621 21:18:58.805000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436452 closing signal SIGTERM W0621 21:18:58.806000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436453 closing signal SIGTERM W0621 21:18:58.806000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436454 closing signal SIGTERM W0621 21:18:58.806000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436455 closing signal SIGTERM W0621 21:18:58.807000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436456 closing signal SIGTERM W0621 21:18:58.806000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367683 closing signal SIGTERM W0621 21:18:58.809000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367684 closing signal SIGTERM W0621 21:18:58.809000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367685 closing signal SIGTERM W0621 21:18:58.810000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367686 closing signal SIGTERM W0621 21:18:58.810000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367687 closing signal SIGTERM W0621 21:18:58.810000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367690 closing signal SIGTERM E0621 21:18:59.071000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 147931) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:18:58 host : fs-mbz-gpu-852 rank : 5 (local_rank: 5) exitcode : 1 (pid: 147931) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ E0621 21:18:59.184000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 2067016) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 E0621 21:18:59.185000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 3436451) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 W0621 21:18:59.195000 2066939 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2066939_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:18:59.635175327 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-901]:54376, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x152a5c5785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x152a4545aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x152a4545c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x152a4545db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x152a45457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x152a45457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) [W621 21:18:59.114490531 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:55556, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x154a281785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x154a1145aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x154a1145c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x152a45458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x152a5478b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x152a53efb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x152a5d5add90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x152a5d5ade40 in /lib/x86_64-linux-gnu/libc.so.6) frame #3: + 0x5baa81d (0x154a1145c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x154a1145d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x154a114574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x154a2078b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) E0621 21:18:59.203000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 3367688) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 frame #7: + 0x37f17d (0x154a1fefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x154a294f1d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x154a294f1e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:18:59.207000 2066939 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2066939_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:18:59.208000 3436378 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3436378_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:18:59.646254020 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-901]:54376, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x152a5c5785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x152a4545aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x152a4545c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x152a4545db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x152a45457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x152a45457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x152a45458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #7: + 0xc0f526 (0x152a5478b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #8: + 0x37f17d (0x152a53efb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #26: + 0x29d90 (0x152a5d5add90 in /lib/x86_64-linux-gnu/libc.so.6) frame #27: __libc_start_main + 0x80 (0x152a5d5ade40 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:18:59.127001947 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:55556, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x154a281785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x154a1145aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x154a1145c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x154a1145db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x154a11457569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x154a2078b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x154a1fefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x154a294f1d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x154a294f1e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:18:59.217000 2066939 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2066939_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) W0621 21:18:59.218000 3436378 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3436378_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent [W621 21:18:59.585911183 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:36364, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1505e37785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1505cc65aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa0d0 (0x1505cc65c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:18:58 host : fs-mbz-gpu-901 rank : 31 (local_rank: 7) exitcode : 1 (pid: 2067016) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ frame #3: + 0x5baa81d (0x1505cc65c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: + 0x5bab4a9 (0x1505cc65d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x1505cc6574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #6: + 0xc0f919 (0x1505db98b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #7: + 0x37f17d (0x1505db0fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #25: + 0x29d90 (0x1505e4777d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #26: __libc_start_main + 0x80 (0x1505e4777e40 in /lib/x86_64-linux-gnu/libc.so.6) [W621 21:18:59.136369568 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:55556, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x154a281785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x154a1145aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x154a1145c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x154a1145db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x154a11457569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x154a2078b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x154a1fefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x154a294f1d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x154a294f1e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:18:59.226000 3367613 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3367613_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. W0621 21:18:59.227000 3436378 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3436378_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper [W621 21:18:59.598417592 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:36364, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1505e37785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1505cc65aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x1505cc65c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) return arg(*args, **kwargs) frame #3: + 0x5babb3e (0x1505cc65db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x1505cc657569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x1505db98b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x1505db0fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main frame #24: + 0x29d90 (0x1505e4777d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x1505e4777e40 in /lib/x86_64-linux-gnu/libc.so.6) launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:18:58 host : fs-mbz-gpu-870 rank : 10 (local_rank: 2) exitcode : 1 (pid: 3436451) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ W0621 21:18:59.236000 3367613 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3367613_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. [W621 21:18:59.607419381 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:36364, remote=[fs-mbz-gpu-852]:29500): Broken pipe Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1505e37785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) frame #1: + 0x5ba8afe (0x1505cc65aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #2: + 0x5baa358 (0x1505cc65c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #3: + 0x5babb3e (0x1505cc65db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x1505cc657569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) frame #5: + 0xc0f919 (0x1505db98b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #6: + 0x37f17d (0x1505db0fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) frame #24: + 0x29d90 (0x1505e4777d90 in /lib/x86_64-linux-gnu/libc.so.6) frame #25: __libc_start_main + 0x80 (0x1505e4777e40 in /lib/x86_64-linux-gnu/libc.so.6) W0621 21:18:59.245000 3367613 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3367613_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. Traceback (most recent call last): File "", line 198, in _run_module_as_main File "", line 88, in _run_code File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in main() File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper return arg(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main launch(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch run(args) File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run elastic_launch( File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ============================================================ ./pretrain_gpt_profile.py FAILED ------------------------------------------------------------ Failures: [1]: time : 2025-06-21_21:18:58 host : fs-mbz-gpu-881 rank : 22 (local_rank: 6) exitcode : 1 (pid: 3367689) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ------------------------------------------------------------ Root Cause (first observed failure): [0]: time : 2025-06-21_21:18:58 host : fs-mbz-gpu-881 rank : 21 (local_rank: 5) exitcode : 1 (pid: 3367688) error_file: traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html ============================================================ + set +x + set +x + set +x + set +x