diff --git "a/attnserver.run_attnserver.slurm.sh.343199.err.log" "b/attnserver.run_attnserver.slurm.sh.343199.err.log" --- "a/attnserver.run_attnserver.slurm.sh.343199.err.log" +++ "b/attnserver.run_attnserver.slurm.sh.343199.err.log" @@ -63384,3 +63384,1278 @@ W0621 21:13:17.322000 1694572 site-packages/torch/distributed/run.py:766] W0621 21:13:17.322000 1694572 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:13:17.322000 1694572 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:13:17.322000 1694572 site-packages/torch/distributed/run.py:766] ***************************************** +[rank16]:[W621 21:13:41.286258329 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank8]:[W621 21:13:41.582695285 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 21:13:41.775796237 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank24]:[W621 21:13:41.186237353 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank26]:[W621 21:13:41.195152027 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:13:41.992563288 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank10]:[W621 21:13:41.873021919 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank18]:[W621 21:13:41.585106512 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank28]:[W621 21:13:41.197706351 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank12]:[W621 21:13:41.876264247 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank15]:[W621 21:13:41.876413539 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank20]:[W621 21:13:41.587255901 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank31]:[W621 21:13:41.200194353 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:13:41.997705442 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank11]:[W621 21:13:41.877770422 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:13:41.998773898 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank27]:[W621 21:13:41.202018289 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:13:41.001033977 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank23]:[W621 21:13:41.589641393 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank19]:[W621 21:13:41.589744195 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank9]:[W621 21:13:41.881463938 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:13:41.003583970 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank21]:[W621 21:13:41.594549224 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank13]:[W621 21:13:41.884444013 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank25]:[W621 21:13:41.207561925 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank29]:[W621 21:13:41.207644032 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 21:13:41.007397732 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 21:13:41.007791270 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank17]:[W621 21:13:41.598878338 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank14]:[W621 21:13:41.888874627 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank22]:[W621 21:13:41.600838580 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank30]:[W621 21:13:41.213520329 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank13]: Traceback (most recent call last): +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank13]: pretrain( +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank13]: iteration, num_floating_point_operations_so_far = train( +[rank13]: ^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank13]: ) = train_step( +[rank13]: ^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank13]: losses_reduced = forward_backward_func( +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: Traceback (most recent call last): +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank6]: pretrain( +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank6]: iteration, num_floating_point_operations_so_far = train( +[rank6]: ^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank6]: ) = train_step( +[rank6]: ^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank6]: losses_reduced = forward_backward_func( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank13]: output_tensor, num_tokens = forward_step( +[rank13]: ^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: output_tensor, num_tokens = forward_step( +[rank6]: ^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank6]: batch = next(global_batches) +[rank6]: ^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank13]: batch = next(global_batches) +[rank13]: ^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank13]: attention_mask = torch.ones( +[rank13]: ^^^^^^^^^^^ +[rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 132.15 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank6]: attention_mask = torch.ones( +[rank6]: ^^^^^^^^^^^ +[rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 132.17 GiB is free. Including non-PyTorch memory, this process has 7.63 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank12]: Traceback (most recent call last): +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank12]: pretrain( +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank12]: iteration, num_floating_point_operations_so_far = train( +[rank12]: ^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank12]: ) = train_step( +[rank12]: ^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank12]: losses_reduced = forward_backward_func( +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: Traceback (most recent call last): +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank1]: pretrain( +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank1]: iteration, num_floating_point_operations_so_far = train( +[rank1]: ^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank1]: ) = train_step( +[rank1]: ^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank1]: losses_reduced = forward_backward_func( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank12]: output_tensor, num_tokens = forward_step( +[rank12]: ^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank1]: output_tensor, num_tokens = forward_step( +[rank1]: ^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank1]: batch = next(global_batches) +[rank1]: ^^^^^^^^^^^^^^^^^^^^ +[rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank12]: batch = next(global_batches) +[rank12]: ^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank12]: attention_mask = torch.ones( +[rank12]: ^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank1]: attention_mask = torch.ones( +[rank1]: ^^^^^^^^^^^ +[rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 132.16 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 132.14 GiB is free. Including non-PyTorch memory, this process has 7.67 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank14]: Traceback (most recent call last): +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank14]: pretrain( +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank14]: iteration, num_floating_point_operations_so_far = train( +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank0]: iteration, num_floating_point_operations_so_far = train( +[rank0]: ^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank0]: ) = train_step( +[rank0]: ^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank0]: losses_reduced = forward_backward_func( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank14]: ^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank14]: ) = train_step( +[rank14]: ^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank14]: losses_reduced = forward_backward_func( +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank14]: output_tensor, num_tokens = forward_step( +[rank14]: ^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank20]: Traceback (most recent call last): +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank20]: pretrain( +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank20]: iteration, num_floating_point_operations_so_far = train( +[rank20]: ^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank20]: ) = train_step( +[rank20]: ^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank20]: losses_reduced = forward_backward_func( +[rank20]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: output_tensor, num_tokens = forward_step( +[rank0]: ^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank0]: batch = next(global_batches) +[rank0]: ^^^^^^^^^^^^^^^^^^^^ +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank14]: batch = next(global_batches) +[rank14]: ^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank14]: attention_mask = torch.ones( +[rank14]: ^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank20]: output_tensor, num_tokens = forward_step( +[rank20]: ^^^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank28]: Traceback (most recent call last): +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank28]: pretrain( +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank28]: iteration, num_floating_point_operations_so_far = train( +[rank28]: ^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank28]: ) = train_step( +[rank28]: ^^^^^^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank28]: losses_reduced = forward_backward_func( +[rank28]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank0]: attention_mask = torch.ones( +[rank0]: ^^^^^^^^^^^ +[rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 132.17 GiB is free. Including non-PyTorch memory, this process has 7.63 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 132.14 GiB is free. Including non-PyTorch memory, this process has 7.67 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank11]: Traceback (most recent call last): +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank11]: pretrain( +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank11]: iteration, num_floating_point_operations_so_far = train( +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank20]: batch = next(global_batches) +[rank20]: ^^^^^^^^^^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank20]: attention_mask = torch.ones( +[rank20]: ^^^^^^^^^^^ +[rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 132.15 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank28]: output_tensor, num_tokens = forward_step( +[rank28]: ^^^^^^^^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: ^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank11]: ) = train_step( +[rank11]: ^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank11]: losses_reduced = forward_backward_func( +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank11]: output_tensor, num_tokens = forward_step( +[rank11]: ^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank22]: Traceback (most recent call last): +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank22]: pretrain( +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank22]: iteration, num_floating_point_operations_so_far = train( +[rank22]: ^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank22]: ) = train_step( +[rank22]: ^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank22]: losses_reduced = forward_backward_func( +[rank22]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank28]: batch = next(global_batches) +[rank28]: ^^^^^^^^^^^^^^^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank28]: attention_mask = torch.ones( +[rank28]: ^^^^^^^^^^^ +[rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 132.16 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank11]: batch = next(global_batches) +[rank11]: ^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank11]: attention_mask = torch.ones( +[rank11]: ^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank22]: output_tensor, num_tokens = forward_step( +[rank22]: ^^^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank29]: Traceback (most recent call last): +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank29]: pretrain( +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank29]: iteration, num_floating_point_operations_so_far = train( +[rank29]: ^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank29]: ) = train_step( +[rank29]: ^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank29]: losses_reduced = forward_backward_func( +[rank29]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 132.15 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank22]: batch = next(global_batches) +[rank22]: ^^^^^^^^^^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank22]: attention_mask = torch.ones( +[rank22]: ^^^^^^^^^^^ +[rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 132.15 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank29]: output_tensor, num_tokens = forward_step( +[rank29]: ^^^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank15]: Traceback (most recent call last): +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank15]: pretrain( +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank15]: iteration, num_floating_point_operations_so_far = train( +[rank15]: ^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank15]: ) = train_step( +[rank15]: ^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank15]: losses_reduced = forward_backward_func( +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank21]: Traceback (most recent call last): +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank21]: pretrain( +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank21]: iteration, num_floating_point_operations_so_far = train( +[rank21]: ^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank21]: ) = train_step( +[rank21]: ^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank21]: losses_reduced = forward_backward_func( +[rank21]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank29]: batch = next(global_batches) +[rank29]: ^^^^^^^^^^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank29]: attention_mask = torch.ones( +[rank29]: ^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank15]: output_tensor, num_tokens = forward_step( +[rank15]: ^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank21]: output_tensor, num_tokens = forward_step( +[rank21]: ^^^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 132.17 GiB is free. Including non-PyTorch memory, this process has 7.63 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank30]: Traceback (most recent call last): +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank30]: pretrain( +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank30]: iteration, num_floating_point_operations_so_far = train( +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank15]: batch = next(global_batches) +[rank15]: ^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank15]: attention_mask = torch.ones( +[rank15]: ^^^^^^^^^^^ +[rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 132.15 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank21]: batch = next(global_batches) +[rank21]: ^^^^^^^^^^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank21]: attention_mask = torch.ones( +[rank21]: ^^^^^^^^^^^ +[rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 132.14 GiB is free. Including non-PyTorch memory, this process has 7.67 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank30]: ^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank30]: ) = train_step( +[rank30]: ^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank30]: losses_reduced = forward_backward_func( +[rank30]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank30]: output_tensor, num_tokens = forward_step( +[rank30]: ^^^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank23]: Traceback (most recent call last): +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank23]: pretrain( +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank23]: iteration, num_floating_point_operations_so_far = train( +[rank23]: ^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank23]: ) = train_step( +[rank23]: ^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank23]: losses_reduced = forward_backward_func( +[rank23]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank30]: batch = next(global_batches) +[rank30]: ^^^^^^^^^^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank30]: attention_mask = torch.ones( +[rank30]: ^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank23]: output_tensor, num_tokens = forward_step( +[rank23]: ^^^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 132.16 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank31]: Traceback (most recent call last): +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank31]: pretrain( +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank31]: iteration, num_floating_point_operations_so_far = train( +[rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank23]: batch = next(global_batches) +[rank23]: ^^^^^^^^^^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank23]: attention_mask = torch.ones( +[rank23]: ^^^^^^^^^^^ +[rank31]: ^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank31]: ) = train_step( +[rank31]: ^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank31]: losses_reduced = forward_backward_func( +[rank31]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank31]: output_tensor, num_tokens = forward_step( +[rank31]: ^^^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank2]: Traceback (most recent call last): +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank2]: pretrain( +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank2]: iteration, num_floating_point_operations_so_far = train( +[rank2]: ^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank2]: ) = train_step( +[rank2]: ^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank2]: losses_reduced = forward_backward_func( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 132.14 GiB is free. Including non-PyTorch memory, this process has 7.67 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank17]: Traceback (most recent call last): +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank17]: pretrain( +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank17]: iteration, num_floating_point_operations_so_far = train( +[rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank31]: batch = next(global_batches) +[rank31]: ^^^^^^^^^^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank31]: attention_mask = torch.ones( +[rank31]: ^^^^^^^^^^^ +[rank2]: output_tensor, num_tokens = forward_step( +[rank2]: ^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank2]: batch = next(global_batches) +[rank2]: ^^^^^^^^^^^^^^^^^^^^ +[rank17]: ^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank17]: ) = train_step( +[rank17]: ^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank17]: losses_reduced = forward_backward_func( +[rank17]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank17]: output_tensor, num_tokens = forward_step( +[rank17]: ^^^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 132.17 GiB is free. Including non-PyTorch memory, this process has 7.63 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank27]: Traceback (most recent call last): +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank27]: pretrain( +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank27]: iteration, num_floating_point_operations_so_far = train( +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank2]: attention_mask = torch.ones( +[rank2]: ^^^^^^^^^^^ +[rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 132.17 GiB is free. Including non-PyTorch memory, this process has 7.63 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank17]: batch = next(global_batches) +[rank17]: ^^^^^^^^^^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank17]: attention_mask = torch.ones( +[rank17]: ^^^^^^^^^^^ +[rank27]: ^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank27]: ) = train_step( +[rank27]: ^^^^^^^^^^^ +[rank3]: Traceback (most recent call last): +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank3]: pretrain( +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank3]: iteration, num_floating_point_operations_so_far = train( +[rank3]: ^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank3]: ) = train_step( +[rank3]: ^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank3]: losses_reduced = forward_backward_func( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank8]: Traceback (most recent call last): +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank8]: pretrain( +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank8]: iteration, num_floating_point_operations_so_far = train( +[rank8]: ^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank8]: ) = train_step( +[rank8]: ^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank8]: losses_reduced = forward_backward_func( +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 132.14 GiB is free. Including non-PyTorch memory, this process has 7.67 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank16]: Traceback (most recent call last): +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank16]: pretrain( +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank16]: iteration, num_floating_point_operations_so_far = train( +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank27]: losses_reduced = forward_backward_func( +[rank27]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank27]: output_tensor, num_tokens = forward_step( +[rank27]: ^^^^^^^^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank3]: output_tensor, num_tokens = forward_step( +[rank3]: ^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank3]: batch = next(global_batches) +[rank3]: ^^^^^^^^^^^^^^^^^^^^ +[rank8]: output_tensor, num_tokens = forward_step( +[rank8]: ^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank8]: batch = next(global_batches) +[rank8]: ^^^^^^^^^^^^^^^^^^^^ +[rank16]: ^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank16]: ) = train_step( +[rank16]: ^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank16]: losses_reduced = forward_backward_func( +[rank16]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank16]: output_tensor, num_tokens = forward_step( +[rank16]: ^^^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank27]: batch = next(global_batches) +[rank27]: ^^^^^^^^^^^^^^^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank27]: attention_mask = torch.ones( +[rank27]: ^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank3]: attention_mask = torch.ones( +[rank3]: ^^^^^^^^^^^ +[rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 132.16 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank8]: attention_mask = torch.ones( +[rank8]: ^^^^^^^^^^^ +[rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 132.14 GiB is free. Including non-PyTorch memory, this process has 7.67 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank16]: batch = next(global_batches) +[rank16]: ^^^^^^^^^^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank16]: attention_mask = torch.ones( +[rank16]: ^^^^^^^^^^^ +[rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 132.17 GiB is free. Including non-PyTorch memory, this process has 7.63 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank5]: Traceback (most recent call last): +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank5]: pretrain( +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank5]: iteration, num_floating_point_operations_so_far = train( +[rank5]: ^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank5]: ) = train_step( +[rank5]: ^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank5]: losses_reduced = forward_backward_func( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 132.15 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank25]: Traceback (most recent call last): +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank25]: pretrain( +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank25]: iteration, num_floating_point_operations_so_far = train( +[rank25]: ^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank25]: ) = train_step( +[rank25]: ^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank25]: losses_reduced = forward_backward_func( +[rank25]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: output_tensor, num_tokens = forward_step( +[rank5]: ^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank5]: batch = next(global_batches) +[rank5]: ^^^^^^^^^^^^^^^^^^^^ +[rank18]: Traceback (most recent call last): +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank18]: pretrain( +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank18]: iteration, num_floating_point_operations_so_far = train( +[rank18]: ^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank18]: ) = train_step( +[rank18]: ^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank18]: losses_reduced = forward_backward_func( +[rank18]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank25]: output_tensor, num_tokens = forward_step( +[rank25]: ^^^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank5]: attention_mask = torch.ones( +[rank5]: ^^^^^^^^^^^ +[rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 132.16 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank7]: Traceback (most recent call last): +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank7]: pretrain( +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank18]: output_tensor, num_tokens = forward_step( +[rank18]: ^^^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank25]: batch = next(global_batches) +[rank25]: ^^^^^^^^^^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank25]: attention_mask = torch.ones( +[rank25]: ^^^^^^^^^^^ +[rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 132.17 GiB is free. Including non-PyTorch memory, this process has 7.63 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank7]: iteration, num_floating_point_operations_so_far = train( +[rank7]: ^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank7]: ) = train_step( +[rank7]: ^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank7]: losses_reduced = forward_backward_func( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank7]: output_tensor, num_tokens = forward_step( +[rank7]: ^^^^^^^^^^^^^ +[rank10]: Traceback (most recent call last): +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank10]: pretrain( +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank10]: iteration, num_floating_point_operations_so_far = train( +[rank10]: ^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank10]: ) = train_step( +[rank10]: ^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank10]: losses_reduced = forward_backward_func( +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank18]: batch = next(global_batches) +[rank18]: ^^^^^^^^^^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank18]: attention_mask = torch.ones( +[rank18]: ^^^^^^^^^^^ +[rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 132.15 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank24]: Traceback (most recent call last): +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank24]: pretrain( +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank24]: iteration, num_floating_point_operations_so_far = train( +[rank24]: ^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank24]: ) = train_step( +[rank24]: ^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank24]: losses_reduced = forward_backward_func( +[rank24]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank10]: output_tensor, num_tokens = forward_step( +[rank10]: ^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank19]: Traceback (most recent call last): +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank19]: pretrain( +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank19]: iteration, num_floating_point_operations_so_far = train( +[rank19]: ^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank19]: ) = train_step( +[rank19]: ^^^^^^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank19]: losses_reduced = forward_backward_func( +[rank19]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank24]: output_tensor, num_tokens = forward_step( +[rank24]: ^^^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank7]: batch = next(global_batches) +[rank7]: ^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank7]: attention_mask = torch.ones( +[rank7]: ^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank10]: batch = next(global_batches) +[rank10]: ^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank10]: attention_mask = torch.ones( +[rank10]: ^^^^^^^^^^^ +[rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 132.14 GiB is free. Including non-PyTorch memory, this process has 7.67 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank19]: output_tensor, num_tokens = forward_step( +[rank19]: ^^^^^^^^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank24]: batch = next(global_batches) +[rank24]: ^^^^^^^^^^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank24]: attention_mask = torch.ones( +[rank24]: ^^^^^^^^^^^ +[rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 132.16 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 132.16 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank4]: Traceback (most recent call last): +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank4]: pretrain( +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank4]: iteration, num_floating_point_operations_so_far = train( +[rank9]: Traceback (most recent call last): +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank9]: pretrain( +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank9]: iteration, num_floating_point_operations_so_far = train( +[rank9]: ^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank9]: ) = train_step( +[rank9]: ^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank9]: losses_reduced = forward_backward_func( +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank19]: batch = next(global_batches) +[rank19]: ^^^^^^^^^^^^^^^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank19]: attention_mask = torch.ones( +[rank19]: ^^^^^^^^^^^ +[rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 132.14 GiB is free. Including non-PyTorch memory, this process has 7.67 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank26]: Traceback (most recent call last): +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank26]: pretrain( +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank26]: iteration, num_floating_point_operations_so_far = train( +[rank26]: ^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank26]: ) = train_step( +[rank26]: ^^^^^^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank26]: losses_reduced = forward_backward_func( +[rank26]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: ^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank4]: ) = train_step( +[rank4]: ^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank4]: losses_reduced = forward_backward_func( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank4]: output_tensor, num_tokens = forward_step( +[rank4]: ^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank9]: output_tensor, num_tokens = forward_step( +[rank9]: ^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank9]: batch = next(global_batches) +[rank9]: ^^^^^^^^^^^^^^^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank26]: output_tensor, num_tokens = forward_step( +[rank26]: ^^^^^^^^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank4]: batch = next(global_batches) +[rank4]: ^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank4]: attention_mask = torch.ones( +[rank4]: ^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank9]: attention_mask = torch.ones( +[rank9]: ^^^^^^^^^^^ +[rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 132.15 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank26]: batch = next(global_batches) +[rank26]: ^^^^^^^^^^^^^^^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank26]: attention_mask = torch.ones( +[rank26]: ^^^^^^^^^^^ +[rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 132.16 GiB is free. Including non-PyTorch memory, this process has 7.65 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 65536.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 132.17 GiB is free. Including non-PyTorch memory, this process has 7.63 GiB memory in use. Of the allocated memory 5.15 GiB is allocated by PyTorch, and 1007.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank1]:[W621 21:13:57.697817027 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank20]:[W621 21:13:57.313522756 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 21:13:57.736561008 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank15]:[W621 21:13:57.734106525 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank17]:[W621 21:13:57.455096818 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank28]:[W621 21:13:57.137679502 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank31]:[W621 21:13:57.137794289 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank12]:[W621 21:13:57.824870764 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank22]:[W621 21:13:57.548089793 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank25]:[W621 21:13:57.188618941 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank30]:[W621 21:13:57.201066496 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank9]:[W621 21:13:57.876172148 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank14]:[W621 21:13:57.878209844 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank27]:[W621 21:13:57.207376370 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank13]:[W621 21:13:57.887086184 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank29]:[W621 21:13:57.210246042 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank23]:[W621 21:13:57.606091634 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank21]:[W621 21:13:57.608142121 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank18]:[W621 21:13:57.609076079 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank10]:[W621 21:13:57.909076420 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank11]:[W621 21:13:57.925214403 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 21:13:57.055212854 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank19]:[W621 21:13:57.665935662 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 21:13:57.100117754 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 21:13:57.120711904 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 21:13:57.138358627 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 21:13:57.180699258 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank26]:[W621 21:13:57.453561742 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 21:13:58.697000 2171712 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2171783 closing signal SIGTERM +W0621 21:13:58.700000 2171712 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2171784 closing signal SIGTERM +W0621 21:13:58.700000 2171712 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2171785 closing signal SIGTERM +W0621 21:13:58.701000 2171712 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2171786 closing signal SIGTERM +W0621 21:13:58.701000 2171712 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2171787 closing signal SIGTERM +W0621 21:13:58.701000 2171712 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2171788 closing signal SIGTERM +W0621 21:13:58.704000 1694572 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1694643 closing signal SIGTERM +W0621 21:13:58.702000 2171712 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2171789 closing signal SIGTERM +W0621 21:13:58.707000 1694572 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1694644 closing signal SIGTERM +W0621 21:13:58.708000 1694572 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1694645 closing signal SIGTERM +W0621 21:13:58.708000 1694572 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1694646 closing signal SIGTERM +W0621 21:13:58.709000 1694572 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1694648 closing signal SIGTERM +W0621 21:13:58.719000 1975666 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1975737 closing signal SIGTERM +W0621 21:13:58.709000 1694572 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1694649 closing signal SIGTERM +W0621 21:13:58.710000 1694572 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1694650 closing signal SIGTERM +W0621 21:13:58.721000 1975666 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1975739 closing signal SIGTERM +W0621 21:13:58.722000 1975666 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1975740 closing signal SIGTERM +W0621 21:13:58.722000 1975666 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1975741 closing signal SIGTERM +W0621 21:13:58.723000 1975666 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1975742 closing signal SIGTERM +W0621 21:13:58.723000 1975666 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1975744 closing signal SIGTERM +W0621 21:13:58.803000 712506 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 712577 closing signal SIGTERM +W0621 21:13:58.805000 712506 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 712579 closing signal SIGTERM +W0621 21:13:58.806000 712506 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 712582 closing signal SIGTERM +W0621 21:13:58.807000 712506 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 712584 closing signal SIGTERM +E0621 21:13:59.000000 1975666 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 1975738) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: +[1]: + time : 2025-06-21_21:13:58 + host : fs-mbz-gpu-702 + rank : 14 (local_rank: 6) + exitcode : 1 (pid: 1975743) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:13:58 + host : fs-mbz-gpu-702 + rank : 9 (local_rank: 1) + exitcode : 1 (pid: 1975738) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ +E0621 21:13:59.051000 1694572 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 4 (pid: 1694647) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:13:58 + host : fs-mbz-gpu-717 + rank : 20 (local_rank: 4) + exitcode : 1 (pid: 1694647) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ +E0621 21:13:59.117000 712506 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 712578) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: +[1]: + time : 2025-06-21_21:13:58 + host : fs-mbz-gpu-600 + rank : 3 (local_rank: 3) + exitcode : 1 (pid: 712580) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +[2]: + time : 2025-06-21_21:13:58 + host : fs-mbz-gpu-600 + rank : 6 (local_rank: 6) + exitcode : 1 (pid: 712583) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:13:58 + host : fs-mbz-gpu-600 + rank : 1 (local_rank: 1) + exitcode : 1 (pid: 712578) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ +E0621 21:13:59.180000 2171712 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 2171790) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:13:58 + host : fs-mbz-gpu-768 + rank : 31 (local_rank: 7) + exitcode : 1 (pid: 2171790) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ set +x ++ set +x ++ set +x