Upload folder using huggingface_hub
Browse files- attnserver.run_attnserver.slurm.sh.343207.err.log +9 -0
- attnserver.run_attnserver.slurm.sh.343207.out.log +733 -0
- attnserver.run_attnserver.slurm.sh.343214.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343214.out.log +255 -0
- attnserver.run_attnserver.slurm.sh.343215.err.log +2 -2
- attnserver.run_attnserver.slurm.sh.343215.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343216.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343216.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343226.out.log +591 -0
- attnserver.run_attnserver.slurm.sh.343237.err.log +343 -0
- attnserver.run_attnserver.slurm.sh.343237.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343238.err.log +379 -0
- attnserver.run_attnserver.slurm.sh.343238.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343239.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343239.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343240.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343240.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343241.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343241.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343242.err.log +156 -0
- attnserver.run_attnserver.slurm.sh.343242.out.log +19 -0
- attnserver.run_attnserver.slurm.sh.343243.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343243.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343244.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343244.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343248.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343248.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343261.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343261.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343262.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343262.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343263.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343263.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343264.err.log +217 -0
- attnserver.run_attnserver.slurm.sh.343264.out.log +0 -0
attnserver.run_attnserver.slurm.sh.343207.err.log
CHANGED
@@ -5419,3 +5419,12 @@ W0621 21:57:15.431000 1545124 site-packages/torch/distributed/run.py:766] ******
|
|
5419 |
warnings.warn(
|
5420 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
5421 |
warnings.warn(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5419 |
warnings.warn(
|
5420 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
5421 |
warnings.warn(
|
5422 |
+
[rank7]:[W621 22:17:12.844696883 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
5423 |
+
[rank4]:[W621 22:17:13.903036627 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
5424 |
+
[rank3]:[W621 22:17:13.903781399 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
5425 |
+
[rank6]:[W621 22:17:13.923039231 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
5426 |
+
[rank5]:[W621 22:17:13.952421917 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
5427 |
+
[rank2]:[W621 22:17:13.088350360 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
5428 |
+
[rank1]:[W621 22:17:13.148366750 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
5429 |
+
[rank0]:[W621 22:17:16.332550332 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
5430 |
+
+ set +x
|
attnserver.run_attnserver.slurm.sh.343207.out.log
CHANGED
@@ -19691,3 +19691,736 @@ batch tensor after cp: labels torch.Size([1, 131072])
|
|
19691 |
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19692 |
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19693 |
batch tensor after cp: position_ids torch.Size([1, 131072])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19691 |
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19692 |
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19693 |
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19694 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19695 |
+
batch tensor: labels torch.Size([1, 131072])
|
19696 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19697 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19698 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19699 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19700 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19701 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19702 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19703 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19704 |
+
Start exporting trace 5
|
19705 |
+
Done exporting trace 5
|
19706 |
+
[2025-06-21 22:09:04] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 125694.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19707 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19708 |
+
batch tensor: labels torch.Size([1, 131072])
|
19709 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19710 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19711 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19712 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19713 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19714 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19715 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19716 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19717 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19718 |
+
batch tensor: labels torch.Size([1, 131072])
|
19719 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19720 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19721 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19722 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19723 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19724 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19725 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19726 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19727 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19728 |
+
batch tensor: labels torch.Size([1, 131072])
|
19729 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19730 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19731 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19732 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19733 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19734 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19735 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19736 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19737 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19738 |
+
batch tensor: labels torch.Size([1, 131072])
|
19739 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19740 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19741 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19742 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19743 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19744 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19745 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19746 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19747 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19748 |
+
batch tensor: labels torch.Size([1, 131072])
|
19749 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19750 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19751 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19752 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19753 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19754 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19755 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19756 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19757 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19758 |
+
batch tensor: labels torch.Size([1, 131072])
|
19759 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19760 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19761 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19762 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19763 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19764 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19765 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19766 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19767 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19768 |
+
batch tensor: labels torch.Size([1, 131072])
|
19769 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19770 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19771 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19772 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19773 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19774 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19775 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19776 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19777 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19778 |
+
batch tensor: labels torch.Size([1, 131072])
|
19779 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19780 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19781 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19782 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19783 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19784 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19785 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19786 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19787 |
+
Start exporting trace 6
|
19788 |
+
Done exporting trace 6
|
19789 |
+
[2025-06-21 22:11:10] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 126612.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19790 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19791 |
+
batch tensor: labels torch.Size([1, 131072])
|
19792 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19793 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19794 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19795 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19796 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19797 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19798 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19799 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19800 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19801 |
+
batch tensor: labels torch.Size([1, 131072])
|
19802 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19803 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19804 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19805 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19806 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19807 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19808 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19809 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19810 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19811 |
+
batch tensor: labels torch.Size([1, 131072])
|
19812 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19813 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19814 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19815 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19816 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19817 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19818 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19819 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19820 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19821 |
+
batch tensor: labels torch.Size([1, 131072])
|
19822 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19823 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19824 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19825 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19826 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19827 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19828 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19829 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19830 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19831 |
+
batch tensor: labels torch.Size([1, 131072])
|
19832 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19833 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19834 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19835 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19836 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19837 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19838 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19839 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19840 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19841 |
+
batch tensor: labels torch.Size([1, 131072])
|
19842 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19843 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19844 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19845 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19846 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19847 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19848 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19849 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19850 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19851 |
+
batch tensor: labels torch.Size([1, 131072])
|
19852 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19853 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19854 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19855 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19856 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19857 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19858 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19859 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19860 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19861 |
+
batch tensor: labels torch.Size([1, 131072])
|
19862 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19863 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19864 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19865 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19866 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19867 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19868 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19869 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19870 |
+
Start exporting trace 7
|
19871 |
+
Done exporting trace 7
|
19872 |
+
[2025-06-21 22:13:16] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 126011.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19873 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19874 |
+
batch tensor: labels torch.Size([1, 131072])
|
19875 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19876 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19877 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19878 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19879 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19880 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19881 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19882 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19883 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19884 |
+
batch tensor: labels torch.Size([1, 131072])
|
19885 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19886 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19887 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19888 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19889 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19890 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19891 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19892 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19893 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19894 |
+
batch tensor: labels torch.Size([1, 131072])
|
19895 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19896 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19897 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19898 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19899 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19900 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19901 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19902 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19903 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19904 |
+
batch tensor: labels torch.Size([1, 131072])
|
19905 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19906 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19907 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19908 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19909 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19910 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19911 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19912 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19913 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19914 |
+
batch tensor: labels torch.Size([1, 131072])
|
19915 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19916 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19917 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19918 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19919 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19920 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19921 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19922 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19923 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19924 |
+
batch tensor: labels torch.Size([1, 131072])
|
19925 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19926 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19927 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19928 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19929 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19930 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19931 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19932 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19933 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19934 |
+
batch tensor: labels torch.Size([1, 131072])
|
19935 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19936 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19937 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19938 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19939 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19940 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19941 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19942 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19943 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19944 |
+
batch tensor: labels torch.Size([1, 131072])
|
19945 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19946 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19947 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19948 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19949 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19950 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19951 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19952 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19953 |
+
Start exporting trace 8
|
19954 |
+
Done exporting trace 8
|
19955 |
+
[2025-06-21 22:14:39] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 82690.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19956 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19957 |
+
batch tensor: labels torch.Size([1, 131072])
|
19958 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19959 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19960 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19961 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19962 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19963 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19964 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19965 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19966 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19967 |
+
batch tensor: labels torch.Size([1, 131072])
|
19968 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19969 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19970 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19971 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19972 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19973 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19974 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19975 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19976 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19977 |
+
batch tensor: labels torch.Size([1, 131072])
|
19978 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19979 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19980 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19981 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19982 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19983 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19984 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19985 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19986 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19987 |
+
batch tensor: labels torch.Size([1, 131072])
|
19988 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19989 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
19990 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
19991 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
19992 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
19993 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
19994 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
19995 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
19996 |
+
batch tensor: tokens torch.Size([1, 131072])
|
19997 |
+
batch tensor: labels torch.Size([1, 131072])
|
19998 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
19999 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20000 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20001 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20002 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20003 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20004 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20005 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20006 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20007 |
+
batch tensor: labels torch.Size([1, 131072])
|
20008 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20009 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20010 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20011 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20012 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20013 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20014 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20015 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20016 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20017 |
+
batch tensor: labels torch.Size([1, 131072])
|
20018 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20019 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20020 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20021 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20022 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20023 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20024 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20025 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20026 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20027 |
+
batch tensor: labels torch.Size([1, 131072])
|
20028 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20029 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20030 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20031 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20032 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20033 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20034 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20035 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20036 |
+
Start exporting trace 9
|
20037 |
+
Done exporting trace 9
|
20038 |
+
[2025-06-21 22:15:21] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 42240.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
20039 |
+
[after training is done] datetime: 2025-06-21 22:15:21
|
20040 |
+
saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format
|
20041 |
+
DEBUG:megatron.training.checkpointing:rank: 6, takes 0.029944181442260742 to prepare state dict for ckpt
|
20042 |
+
DEBUG:megatron.training.checkpointing:rank: 4, takes 0.029944896697998047 to prepare state dict for ckpt
|
20043 |
+
DEBUG:megatron.training.checkpointing:rank: 7, takes 0.02997756004333496 to prepare state dict for ckpt
|
20044 |
+
DEBUG:megatron.training.checkpointing:rank: 5, takes 0.029980897903442383 to prepare state dict for ckpt
|
20045 |
+
DEBUG:megatron.training.checkpointing:rank: 3, takes 0.030018329620361328 to prepare state dict for ckpt
|
20046 |
+
DEBUG:megatron.training.checkpointing:rank: 2, takes 0.032994747161865234 to prepare state dict for ckpt
|
20047 |
+
DEBUG:megatron.training.checkpointing:rank: 1, takes 0.03483152389526367 to prepare state dict for ckpt
|
20048 |
+
DEBUG:megatron.training.checkpointing:rank: 0, takes 0.04432272911071777 to prepare state dict for ckpt
|
20049 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
20050 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
20051 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
20052 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
20053 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
20054 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
20055 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
20056 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
20057 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 16.754762649536133
|
20058 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 16.755059242248535
|
20059 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 16.754767894744873
|
20060 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 16.755425691604614
|
20061 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 16.75556445121765
|
20062 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 16.755885124206543
|
20063 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 16.75555682182312
|
20064 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.008649587631225586
|
20065 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save
|
20066 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save
|
20067 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save
|
20068 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save
|
20069 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save
|
20070 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata
|
20071 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata
|
20072 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata
|
20073 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed
|
20074 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata
|
20075 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed
|
20076 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed
|
20077 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed
|
20078 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata
|
20079 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed
|
20080 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save
|
20081 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata
|
20082 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed
|
20083 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save
|
20084 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata
|
20085 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed
|
20086 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save
|
20087 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata
|
20088 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed
|
20089 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.00948190689086914
|
20090 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.008324623107910156
|
20091 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544139.342456
|
20092 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.009523391723632812
|
20093 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.009513616561889648
|
20094 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544139.3424892
|
20095 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544139.3424945
|
20096 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544139.3424995
|
20097 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.009567499160766602
|
20098 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.007208824157714844
|
20099 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544139.3425388
|
20100 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544139.3425443
|
20101 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010204315185546875
|
20102 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010132789611816406
|
20103 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010085105895996094
|
20104 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010538101196289062
|
20105 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.009640932083129883
|
20106 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010156631469726562
|
20107 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544139.3426392
|
20108 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010824203491210938
|
20109 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.655952453613281e-05
|
20110 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.009952783584594727
|
20111 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544139.3492665
|
20112 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011038780212402344
|
20113 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05514168739318848
|
20114 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544139.3981066 rank: 6, write(async) time: 0.05561351776123047
|
20115 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0555727481842041
|
20116 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05548429489135742
|
20117 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.055655479431152344
|
20118 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.055699825286865234
|
20119 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544139.3985298 rank: 4, write(async) time: 0.05603933334350586
|
20120 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544139.3985932 rank: 3, write(async) time: 0.05595111846923828
|
20121 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544139.3986025 rank: 7, write(async) time: 0.056096792221069336
|
20122 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544139.3986292 rank: 5, write(async) time: 0.05617260932922363
|
20123 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05745339393615723
|
20124 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544139.4004755 rank: 2, write(async) time: 0.057933807373046875
|
20125 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05948781967163086
|
20126 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544139.4025002 rank: 1, write(async) time: 0.05995345115661621
|
20127 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0785679817199707
|
20128 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544139.4283192 rank: 0, write(async) time: 0.07905006408691406
|
20129 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.71661376953125e-05 to finish D2H
|
20130 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.8596649169921875e-05 to finish D2H
|
20131 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 1.7881393432617188e-05 to finish D2H
|
20132 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.7404556274414062e-05 to finish D2H
|
20133 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 4.744529724121094e-05 to finish D2H
|
20134 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.6689300537109375e-05 to finish D2H
|
20135 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.6927719116210938e-05 to finish D2H
|
20136 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.02756810188293457 to schedule async ckpt
|
20137 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.02429986000061035 to schedule async ckpt
|
20138 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.027829885482788086 to schedule async ckpt
|
20139 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.025727033615112305 to schedule async ckpt
|
20140 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.027070045471191406 to schedule async ckpt
|
20141 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.03217744827270508 to schedule async ckpt
|
20142 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.03220176696777344 to schedule async ckpt
|
20143 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started
|
20144 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started
|
20145 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started
|
20146 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started
|
20147 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started
|
20148 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started
|
20149 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started
|
20150 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results...
|
20151 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results...
|
20152 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started
|
20153 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started
|
20154 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results...
|
20155 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results...
|
20156 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started
|
20157 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started
|
20158 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results...
|
20159 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started
|
20160 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results...
|
20161 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results...
|
20162 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started
|
20163 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started
|
20164 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216584192, before: 1609834496, after: 1826418688
|
20165 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214532096, before: 1611698176, after: 1826230272
|
20166 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216518656, before: 1611857920, after: 1828376576
|
20167 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214421504, before: 1609834496, after: 1824256000
|
20168 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216530944, before: 1612681216, after: 1829212160
|
20169 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214511616, before: 1633198080, after: 1847709696
|
20170 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216510464, before: 1633198080, after: 1849708544
|
20171 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214740992, before: 1612681216, after: 1827422208
|
20172 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214638592, before: 1642680320, after: 1857318912
|
20173 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214769664, before: 1613099008, after: 1827868672
|
20174 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully
|
20175 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544140.5652304, rank: 1, write(sync,parallel): 0.9489068984985352
|
20176 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216535040, before: 1611698176, after: 1828233216
|
20177 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully
|
20178 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544140.5889492, rank: 5, write(sync,parallel): 0.9763185977935791
|
20179 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully
|
20180 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544140.5965796, rank: 2, write(sync,parallel): 0.9779596328735352
|
20181 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully
|
20182 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544140.605897, rank: 7, write(sync,parallel): 0.9978861808776855
|
20183 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216559616, before: 1613099008, after: 1829658624
|
20184 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214691840, before: 1611857920, after: 1826549760
|
20185 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully
|
20186 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544140.6461089, rank: 3, write(sync,parallel): 1.0371227264404297
|
20187 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216494080, before: 1642680320, after: 1859174400
|
20188 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully
|
20189 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544140.6598027, rank: 6, write(sync,parallel): 1.0446600914001465
|
20190 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully
|
20191 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544140.689988, rank: 4, write(sync,parallel): 1.0734827518463135
|
20192 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.4543533325195312e-05 to finish D2H
|
20193 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.03906750679016113 to schedule async ckpt
|
20194 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process
|
20195 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process
|
20196 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process
|
20197 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process
|
20198 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.36s from forking
|
20199 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process
|
20200 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.36s from forking
|
20201 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process
|
20202 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.36s from forking
|
20203 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.36s from forking
|
20204 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.37s from forking
|
20205 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.36s from forking
|
20206 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process
|
20207 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.36s from forking
|
20208 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process
|
20209 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started
|
20210 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results...
|
20211 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started
|
20212 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 1500389376, before: 1894322176, after: 3394711552
|
20213 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 2152157184, before: 1894322176, after: 4046479360
|
20214 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully
|
20215 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544148.107441, rank: 0, write(sync,parallel): 7.164879322052002
|
20216 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 7.25s from forking
|
20217 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544148.1547318, 1, gather: 7.2114081382751465
|
20218 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544148.1548955, 5, gather: 7.211618661880493
|
20219 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544148.1549287, 3, gather: 7.212156534194946
|
20220 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544148.1549501, 4, gather: 7.2118775844573975
|
20221 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544148.1549723, 6, gather: 7.211687326431274
|
20222 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544148.1550899, 7, gather: 7.2117626667022705
|
20223 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544148.155051, 2, gather: 7.209801912307739
|
20224 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544148.1568513, 0, gather: 0.0050885677337646484
|
20225 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544148.1725032, metadata_write: 0.01547551155090332
|
20226 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0234s
|
20227 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 7.2316s
|
20228 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 7.2317s
|
20229 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 7.2316s
|
20230 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 7.2318s
|
20231 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 7.2318s
|
20232 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 7.2317s
|
20233 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 7.2310s
|
20234 |
+
successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/8, p 1/1 ]
|
20235 |
+
DEBUG:megatron.training.checkpointing:rank: 5, takes 0.0024557113647460938 to finalize ckpt save
|
20236 |
+
DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0024611949920654297 to finalize ckpt save
|
20237 |
+
DEBUG:megatron.training.checkpointing:rank: 1, takes 0.002426624298095703 to finalize ckpt save
|
20238 |
+
DEBUG:megatron.training.checkpointing:rank: 6, takes 0.002448558807373047 to finalize ckpt save
|
20239 |
+
DEBUG:megatron.training.checkpointing:rank: 3, takes 0.002458810806274414 to finalize ckpt save
|
20240 |
+
DEBUG:megatron.training.checkpointing:rank: 0, takes 0.002514362335205078 to finalize ckpt save
|
20241 |
+
DEBUG:megatron.training.checkpointing:rank: 4, takes 0.0024890899658203125 to finalize ckpt save
|
20242 |
+
DEBUG:megatron.training.checkpointing:rank: 2, takes 0.0023038387298583984 to finalize ckpt save
|
20243 |
+
WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED
|
20244 |
+
Evaluating on 1 samples
|
20245 |
+
Evaluating iter 1/1
|
20246 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20247 |
+
batch tensor: labels torch.Size([1, 131072])
|
20248 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20249 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20250 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20251 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20252 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20253 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20254 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20255 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20256 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20257 |
+
batch tensor: labels torch.Size([1, 131072])
|
20258 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20259 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20260 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20261 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20262 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20263 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20264 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20265 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20266 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20267 |
+
batch tensor: labels torch.Size([1, 131072])
|
20268 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20269 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20270 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20271 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20272 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20273 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20274 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20275 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20276 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20277 |
+
batch tensor: labels torch.Size([1, 131072])
|
20278 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20279 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20280 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20281 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20282 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20283 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20284 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20285 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20286 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20287 |
+
batch tensor: labels torch.Size([1, 131072])
|
20288 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20289 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20290 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20291 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20292 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20293 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20294 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20295 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20296 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20297 |
+
batch tensor: labels torch.Size([1, 131072])
|
20298 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20299 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20300 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20301 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20302 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20303 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20304 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20305 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20306 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20307 |
+
batch tensor: labels torch.Size([1, 131072])
|
20308 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20309 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20310 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20311 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20312 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20313 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20314 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20315 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20316 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20317 |
+
batch tensor: labels torch.Size([1, 131072])
|
20318 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20319 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20320 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20321 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20322 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20323 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20324 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20325 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20326 |
+
Start exporting trace 10
|
20327 |
+
Done exporting trace 10
|
20328 |
+
(min, max) time across ranks (ms):
|
20329 |
+
evaluate .......................................: (42509.74, 42510.08)
|
20330 |
+
----------------------------------------------------------------------------------------------------------------
|
20331 |
+
validation loss at iteration 10 on validation set | lm loss value: 1.151343E+01 | lm loss PPL: 1.000505E+05 |
|
20332 |
+
----------------------------------------------------------------------------------------------------------------
|
20333 |
+
WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED
|
20334 |
+
WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED
|
20335 |
+
WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED
|
20336 |
+
Evaluating on 1 samples
|
20337 |
+
Evaluating iter 1/1
|
20338 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20339 |
+
batch tensor: labels torch.Size([1, 131072])
|
20340 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20341 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20342 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20343 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20344 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20345 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20346 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20347 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20348 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20349 |
+
batch tensor: labels torch.Size([1, 131072])
|
20350 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20351 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20352 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20353 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20354 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20355 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20356 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20357 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20358 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20359 |
+
batch tensor: labels torch.Size([1, 131072])
|
20360 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20361 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20362 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20363 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20364 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20365 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20366 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20367 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20368 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20369 |
+
batch tensor: labels torch.Size([1, 131072])
|
20370 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20371 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20372 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20373 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20374 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20375 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20376 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20377 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20378 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20379 |
+
batch tensor: labels torch.Size([1, 131072])
|
20380 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20381 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20382 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20383 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20384 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20385 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20386 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20387 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20388 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20389 |
+
batch tensor: labels torch.Size([1, 131072])
|
20390 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20391 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20392 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20393 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20394 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20395 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20396 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20397 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20398 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20399 |
+
batch tensor: labels torch.Size([1, 131072])
|
20400 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20401 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20402 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20403 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20404 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20405 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20406 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20407 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20408 |
+
batch tensor: tokens torch.Size([1, 131072])
|
20409 |
+
batch tensor: labels torch.Size([1, 131072])
|
20410 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
20411 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
20412 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
20413 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
20414 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
20415 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
20416 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
20417 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
20418 |
+
Start exporting trace 11
|
20419 |
+
Done exporting trace 11
|
20420 |
+
(min, max) time across ranks (ms):
|
20421 |
+
evaluate .......................................: (40780.29, 40780.77)
|
20422 |
+
----------------------------------------------------------------------------------------------------------
|
20423 |
+
validation loss at iteration 10 on test set | lm loss value: 1.151343E+01 | lm loss PPL: 1.000505E+05 |
|
20424 |
+
----------------------------------------------------------------------------------------------------------
|
20425 |
+
WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED
|
20426 |
+
WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED
|
attnserver.run_attnserver.slurm.sh.343214.err.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343214.out.log
CHANGED
@@ -42908,3 +42908,258 @@ WARNING: constraints for invoking optimized fused softmax kernel are not met. We
|
|
42908 |
time to initialize megatron (seconds): 14.391
|
42909 |
[after megatron is initialized] datetime: 2025-06-21 22:08:43
|
42910 |
building GPT model ...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42908 |
time to initialize megatron (seconds): 14.391
|
42909 |
[after megatron is initialized] datetime: 2025-06-21 22:08:43
|
42910 |
building GPT model ...
|
42911 |
+
>>> embedding
|
42912 |
+
>>> decoder
|
42913 |
+
>>> output_layer
|
42914 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 676924416
|
42915 |
+
>>> embedding
|
42916 |
+
>>> decoder
|
42917 |
+
>>> output_layer
|
42918 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 676924416
|
42919 |
+
>>> embedding
|
42920 |
+
>>> decoder
|
42921 |
+
>>> output_layer
|
42922 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 676924416
|
42923 |
+
>>> embedding
|
42924 |
+
>>> decoder
|
42925 |
+
>>> output_layer
|
42926 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 676924416
|
42927 |
+
>>> embedding
|
42928 |
+
>>> decoder
|
42929 |
+
>>> output_layer
|
42930 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 676924416
|
42931 |
+
>>> embedding
|
42932 |
+
>>> decoder
|
42933 |
+
>>> output_layer
|
42934 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 676924416
|
42935 |
+
>>> embedding
|
42936 |
+
>>> decoder
|
42937 |
+
>>> output_layer
|
42938 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 676924416
|
42939 |
+
>>> embedding
|
42940 |
+
>>> decoder
|
42941 |
+
>>> output_layer
|
42942 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 676924416
|
42943 |
+
>>> embedding
|
42944 |
+
>>> decoder
|
42945 |
+
>>> output_layer
|
42946 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 676924416
|
42947 |
+
>>> embedding
|
42948 |
+
>>> decoder
|
42949 |
+
>>> output_layer
|
42950 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 676924416
|
42951 |
+
>>> embedding
|
42952 |
+
>>> decoder
|
42953 |
+
>>> output_layer
|
42954 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 676924416
|
42955 |
+
>>> embedding
|
42956 |
+
>>> decoder
|
42957 |
+
>>> output_layer
|
42958 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 676924416
|
42959 |
+
>>> embedding
|
42960 |
+
>>> decoder
|
42961 |
+
>>> output_layer
|
42962 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 676924416
|
42963 |
+
>>> embedding
|
42964 |
+
>>> decoder
|
42965 |
+
>>> output_layer
|
42966 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 676924416
|
42967 |
+
>>> embedding
|
42968 |
+
>>> decoder
|
42969 |
+
>>> output_layer
|
42970 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 676924416
|
42971 |
+
>>> embedding
|
42972 |
+
>>> decoder
|
42973 |
+
>>> output_layer
|
42974 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 676924416
|
42975 |
+
>>> embedding
|
42976 |
+
>>> decoder
|
42977 |
+
>>> output_layer
|
42978 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 676924416
|
42979 |
+
>>> embedding
|
42980 |
+
>>> decoder
|
42981 |
+
>>> output_layer
|
42982 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 676924416
|
42983 |
+
>>> embedding
|
42984 |
+
>>> decoder
|
42985 |
+
>>> output_layer
|
42986 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 676924416
|
42987 |
+
>>> embedding
|
42988 |
+
>>> decoder
|
42989 |
+
>>> output_layer
|
42990 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 676924416
|
42991 |
+
>>> embedding
|
42992 |
+
>>> decoder
|
42993 |
+
>>> output_layer
|
42994 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 676924416
|
42995 |
+
>>> embedding
|
42996 |
+
>>> decoder
|
42997 |
+
>>> output_layer
|
42998 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 676924416
|
42999 |
+
>>> embedding
|
43000 |
+
>>> decoder
|
43001 |
+
>>> output_layer
|
43002 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 676924416
|
43003 |
+
>>> embedding
|
43004 |
+
>>> decoder
|
43005 |
+
>>> output_layer
|
43006 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 676924416
|
43007 |
+
>>> embedding
|
43008 |
+
>>> decoder
|
43009 |
+
>>> output_layer
|
43010 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 676924416
|
43011 |
+
>>> embedding
|
43012 |
+
>>> decoder
|
43013 |
+
>>> output_layer
|
43014 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 676924416
|
43015 |
+
>>> embedding
|
43016 |
+
>>> decoder
|
43017 |
+
>>> output_layer
|
43018 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 676924416
|
43019 |
+
>>> embedding
|
43020 |
+
>>> decoder
|
43021 |
+
>>> output_layer
|
43022 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 676924416
|
43023 |
+
>>> embedding
|
43024 |
+
>>> decoder
|
43025 |
+
>>> output_layer
|
43026 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 676924416
|
43027 |
+
>>> embedding
|
43028 |
+
>>> decoder
|
43029 |
+
>>> output_layer
|
43030 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 676924416
|
43031 |
+
>>> embedding
|
43032 |
+
>>> decoder
|
43033 |
+
>>> output_layer
|
43034 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 676924416
|
43035 |
+
>>> embedding
|
43036 |
+
>>> decoder
|
43037 |
+
>>> output_layer
|
43038 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 676924416
|
43039 |
+
INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False)
|
43040 |
+
INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1
|
43041 |
+
Params for bucket 1 (676924416 elements, 676924416 padded size):
|
43042 |
+
module.decoder.final_layernorm.bias
|
43043 |
+
module.decoder.layers.1.mlp.linear_fc2.weight
|
43044 |
+
module.decoder.layers.1.self_attention.linear_proj.bias
|
43045 |
+
module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight
|
43046 |
+
module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias
|
43047 |
+
module.decoder.layers.0.mlp.linear_fc2.weight
|
43048 |
+
module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias
|
43049 |
+
module.decoder.final_layernorm.weight
|
43050 |
+
module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight
|
43051 |
+
module.decoder.layers.1.self_attention.linear_qkv.bias
|
43052 |
+
module.decoder.layers.0.mlp.linear_fc2.bias
|
43053 |
+
module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight
|
43054 |
+
module.decoder.layers.1.mlp.linear_fc1.weight
|
43055 |
+
module.decoder.layers.0.mlp.linear_fc1.weight
|
43056 |
+
module.decoder.layers.0.self_attention.linear_proj.weight
|
43057 |
+
module.embedding.word_embeddings.weight
|
43058 |
+
module.decoder.layers.1.mlp.linear_fc2.bias
|
43059 |
+
module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight
|
43060 |
+
module.decoder.layers.0.self_attention.linear_proj.bias
|
43061 |
+
module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias
|
43062 |
+
module.decoder.layers.0.self_attention.linear_qkv.weight
|
43063 |
+
module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias
|
43064 |
+
module.embedding.position_embeddings.weight
|
43065 |
+
module.decoder.layers.1.mlp.linear_fc1.bias
|
43066 |
+
module.decoder.layers.0.mlp.linear_fc1.bias
|
43067 |
+
module.decoder.layers.1.self_attention.linear_qkv.weight
|
43068 |
+
module.decoder.layers.1.self_attention.linear_proj.weight
|
43069 |
+
module.decoder.layers.0.self_attention.linear_qkv.bias
|
43070 |
+
INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=<megatron.core.timers.Timers object at 0x14df3ebb2360>, config_logger_dir='')
|
43071 |
+
INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine
|
43072 |
+
WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt
|
43073 |
+
will not load any checkpoints and will start from random
|
43074 |
+
(min, max) time across ranks (ms):
|
43075 |
+
load-checkpoint ................................: (2.39, 5.10)
|
43076 |
+
[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:08:51
|
43077 |
+
> building train, validation, and test datasets ...
|
43078 |
+
> datasets target sizes (minimum size):
|
43079 |
+
train: 10
|
43080 |
+
validation: 1
|
43081 |
+
test: 1
|
43082 |
+
INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None
|
43083 |
+
INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True
|
43084 |
+
INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)]
|
43085 |
+
> building train, validation, and test datasets for GPT ...
|
43086 |
+
INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=131072, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=<megatron.training.tokenizer.tokenizer._GPT2BPETokenizer object at 0x14df47ebf260>, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None)
|
43087 |
+
INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices
|
43088 |
+
DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False
|
43089 |
+
WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None
|
43090 |
+
DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.007706 seconds
|
43091 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 520
|
43092 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1
|
43093 |
+
INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices
|
43094 |
+
DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False
|
43095 |
+
WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None
|
43096 |
+
DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001555 seconds
|
43097 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 520
|
43098 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1
|
43099 |
+
INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices
|
43100 |
+
DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False
|
43101 |
+
WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None
|
43102 |
+
DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001344 seconds
|
43103 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 520
|
43104 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1
|
43105 |
+
> finished creating GPT datasets ...
|
43106 |
+
[after dataloaders are built] datetime: 2025-06-21 22:08:51
|
43107 |
+
done with setup ...
|
43108 |
+
training ...
|
43109 |
+
(min, max) time across ranks (ms):
|
43110 |
+
model-and-optimizer-setup ......................: (7342.39, 7383.37)
|
43111 |
+
train/valid/test-data-iterators-setup ..........: (24.38, 172.83)
|
43112 |
+
Setting rerun_state_machine.current_iteration to 0...
|
43113 |
+
[before the start of training step] datetime: 2025-06-21 22:08:51
|
43114 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43115 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43116 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43117 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43118 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43119 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43120 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43121 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43122 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43123 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43124 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43125 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43126 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43127 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43128 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43129 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43130 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43131 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43132 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43133 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43134 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43135 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43136 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43137 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43138 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43139 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43140 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43141 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43142 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43143 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43144 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43145 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43146 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43147 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43148 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43149 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43150 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43151 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43152 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43153 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43154 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43155 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.03 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43156 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43157 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43158 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43159 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.02 GiB is free. Including non-PyTorch memory, this process has 134.78 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43160 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43161 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43162 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43163 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.04 GiB is free. Including non-PyTorch memory, this process has 134.76 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
43164 |
+
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
43165 |
+
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.01 GiB is free. Including non-PyTorch memory, this process has 134.80 GiB memory in use. Of the allocated memory 133.06 GiB is allocated by PyTorch, and 210.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
attnserver.run_attnserver.slurm.sh.343215.err.log
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f64b8b1e0289e3f82b6ca9559fa8e0d3dd9e6cc8198f74ef81e1ecf64814e907
|
3 |
+
size 60421789
|
attnserver.run_attnserver.slurm.sh.343215.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343216.err.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343216.out.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343226.out.log
CHANGED
@@ -19193,3 +19193,594 @@ batch tensor after cp: labels torch.Size([2, 81920])
|
|
19193 |
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19194 |
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19195 |
batch tensor after cp: position_ids torch.Size([2, 81920])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19193 |
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19194 |
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19195 |
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19196 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19197 |
+
batch tensor: labels torch.Size([2, 163840])
|
19198 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19199 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19200 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19201 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19202 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19203 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19204 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19205 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19206 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19207 |
+
batch tensor: labels torch.Size([2, 163840])
|
19208 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19209 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19210 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19211 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19212 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19213 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19214 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19215 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19216 |
+
Start exporting trace 2
|
19217 |
+
Done exporting trace 2
|
19218 |
+
[2025-06-21 22:09:31] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 87543.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19219 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19220 |
+
batch tensor: labels torch.Size([2, 163840])
|
19221 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19222 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19223 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19224 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19225 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19226 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19227 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19228 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19229 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19230 |
+
batch tensor: labels torch.Size([2, 163840])
|
19231 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19232 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19233 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19234 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19235 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19236 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19237 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19238 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19239 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19240 |
+
batch tensor: labels torch.Size([2, 163840])
|
19241 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19242 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19243 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19244 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19245 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19246 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19247 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19248 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19249 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19250 |
+
batch tensor: labels torch.Size([2, 163840])
|
19251 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19252 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19253 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19254 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19255 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19256 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19257 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19258 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19259 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19260 |
+
batch tensor: labels torch.Size([2, 163840])
|
19261 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19262 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19263 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19264 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19265 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19266 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19267 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19268 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19269 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19270 |
+
batch tensor: labels torch.Size([2, 163840])
|
19271 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19272 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19273 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19274 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19275 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19276 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19277 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19278 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19279 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19280 |
+
batch tensor: labels torch.Size([2, 163840])
|
19281 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19282 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19283 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19284 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19285 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19286 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19287 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19288 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19289 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19290 |
+
batch tensor: labels torch.Size([2, 163840])
|
19291 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19292 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19293 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19294 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19295 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19296 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19297 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19298 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19299 |
+
Start exporting trace 3
|
19300 |
+
Done exporting trace 3
|
19301 |
+
[2025-06-21 22:10:53] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 82310.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19302 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19303 |
+
batch tensor: labels torch.Size([2, 163840])
|
19304 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19305 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19306 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19307 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19308 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19309 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19310 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19311 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19312 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19313 |
+
batch tensor: labels torch.Size([2, 163840])
|
19314 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19315 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19316 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19317 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19318 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19319 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19320 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19321 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19322 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19323 |
+
batch tensor: labels torch.Size([2, 163840])
|
19324 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19325 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19326 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19327 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19328 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19329 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19330 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19331 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19332 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19333 |
+
batch tensor: labels torch.Size([2, 163840])
|
19334 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19335 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19336 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19337 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19338 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19339 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19340 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19341 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19342 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19343 |
+
batch tensor: labels torch.Size([2, 163840])
|
19344 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19345 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19346 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19347 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19348 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19349 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19350 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19351 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19352 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19353 |
+
batch tensor: labels torch.Size([2, 163840])
|
19354 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19355 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19356 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19357 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19358 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19359 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19360 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19361 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19362 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19363 |
+
batch tensor: labels torch.Size([2, 163840])
|
19364 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19365 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19366 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19367 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19368 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19369 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19370 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19371 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19372 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19373 |
+
batch tensor: labels torch.Size([2, 163840])
|
19374 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19375 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19376 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19377 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19378 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19379 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19380 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19381 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19382 |
+
Start exporting trace 4
|
19383 |
+
Done exporting trace 4
|
19384 |
+
[2025-06-21 22:12:14] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 80943.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19385 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19386 |
+
batch tensor: labels torch.Size([2, 163840])
|
19387 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19388 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19389 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19390 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19391 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19392 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19393 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19394 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19395 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19396 |
+
batch tensor: labels torch.Size([2, 163840])
|
19397 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19398 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19399 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19400 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19401 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19402 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19403 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19404 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19405 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19406 |
+
batch tensor: labels torch.Size([2, 163840])
|
19407 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19408 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19409 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19410 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19411 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19412 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19413 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19414 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19415 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19416 |
+
batch tensor: labels torch.Size([2, 163840])
|
19417 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19418 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19419 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19420 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19421 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19422 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19423 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19424 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19425 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19426 |
+
batch tensor: labels torch.Size([2, 163840])
|
19427 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19428 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19429 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19430 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19431 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19432 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19433 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19434 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19435 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19436 |
+
batch tensor: labels torch.Size([2, 163840])
|
19437 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19438 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19439 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19440 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19441 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19442 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19443 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19444 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19445 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19446 |
+
batch tensor: labels torch.Size([2, 163840])
|
19447 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19448 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19449 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19450 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19451 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19452 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19453 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19454 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19455 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19456 |
+
batch tensor: labels torch.Size([2, 163840])
|
19457 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19458 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19459 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19460 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19461 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19462 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19463 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19464 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19465 |
+
Start exporting trace 5
|
19466 |
+
Done exporting trace 5
|
19467 |
+
[2025-06-21 22:14:09] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 115269.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19468 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19469 |
+
batch tensor: labels torch.Size([2, 163840])
|
19470 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19471 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19472 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19473 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19474 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19475 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19476 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19477 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19478 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19479 |
+
batch tensor: labels torch.Size([2, 163840])
|
19480 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19481 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19482 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19483 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19484 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19485 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19486 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19487 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19488 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19489 |
+
batch tensor: labels torch.Size([2, 163840])
|
19490 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19491 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19492 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19493 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19494 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19495 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19496 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19497 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19498 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19499 |
+
batch tensor: labels torch.Size([2, 163840])
|
19500 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19501 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19502 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19503 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19504 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19505 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19506 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19507 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19508 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19509 |
+
batch tensor: labels torch.Size([2, 163840])
|
19510 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19511 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19512 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19513 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19514 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19515 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19516 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19517 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19518 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19519 |
+
batch tensor: labels torch.Size([2, 163840])
|
19520 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19521 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19522 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19523 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19524 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19525 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19526 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19527 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19528 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19529 |
+
batch tensor: labels torch.Size([2, 163840])
|
19530 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19531 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19532 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19533 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19534 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19535 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19536 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19537 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19538 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19539 |
+
batch tensor: labels torch.Size([2, 163840])
|
19540 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19541 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19542 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19543 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19544 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19545 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19546 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19547 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19548 |
+
Start exporting trace 6
|
19549 |
+
Done exporting trace 6
|
19550 |
+
[2025-06-21 22:15:29] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 79840.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19551 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19552 |
+
batch tensor: labels torch.Size([2, 163840])
|
19553 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19554 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19555 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19556 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19557 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19558 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19559 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19560 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19561 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19562 |
+
batch tensor: labels torch.Size([2, 163840])
|
19563 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19564 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19565 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19566 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19567 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19568 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19569 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19570 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19571 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19572 |
+
batch tensor: labels torch.Size([2, 163840])
|
19573 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19574 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19575 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19576 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19577 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19578 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19579 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19580 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19581 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19582 |
+
batch tensor: labels torch.Size([2, 163840])
|
19583 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19584 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19585 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19586 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19587 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19588 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19589 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19590 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19591 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19592 |
+
batch tensor: labels torch.Size([2, 163840])
|
19593 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19594 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19595 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19596 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19597 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19598 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19599 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19600 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19601 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19602 |
+
batch tensor: labels torch.Size([2, 163840])
|
19603 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19604 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19605 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19606 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19607 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19608 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19609 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19610 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19611 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19612 |
+
batch tensor: labels torch.Size([2, 163840])
|
19613 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19614 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19615 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19616 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19617 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19618 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19619 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19620 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19621 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19622 |
+
batch tensor: labels torch.Size([2, 163840])
|
19623 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19624 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19625 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19626 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19627 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19628 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19629 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19630 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19631 |
+
Start exporting trace 7
|
19632 |
+
Done exporting trace 7
|
19633 |
+
[2025-06-21 22:16:57] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 88355.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19634 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19635 |
+
batch tensor: labels torch.Size([2, 163840])
|
19636 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19637 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19638 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19639 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19640 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19641 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19642 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19643 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19644 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19645 |
+
batch tensor: labels torch.Size([2, 163840])
|
19646 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19647 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19648 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19649 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19650 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19651 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19652 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19653 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19654 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19655 |
+
batch tensor: labels torch.Size([2, 163840])
|
19656 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19657 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19658 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19659 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19660 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19661 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19662 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19663 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19664 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19665 |
+
batch tensor: labels torch.Size([2, 163840])
|
19666 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19667 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19668 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19669 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19670 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19671 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19672 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19673 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19674 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19675 |
+
batch tensor: labels torch.Size([2, 163840])
|
19676 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19677 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19678 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19679 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19680 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19681 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19682 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19683 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19684 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19685 |
+
batch tensor: labels torch.Size([2, 163840])
|
19686 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19687 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19688 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19689 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19690 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19691 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19692 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19693 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19694 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19695 |
+
batch tensor: labels torch.Size([2, 163840])
|
19696 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19697 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19698 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19699 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19700 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19701 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19702 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19703 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19704 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19705 |
+
batch tensor: labels torch.Size([2, 163840])
|
19706 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19707 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19708 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19709 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19710 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19711 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19712 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19713 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19714 |
+
Start exporting trace 8
|
19715 |
+
Done exporting trace 8
|
19716 |
+
[2025-06-21 22:18:29] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 91688.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
19717 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19718 |
+
batch tensor: labels torch.Size([2, 163840])
|
19719 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19720 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19721 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19722 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19723 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19724 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19725 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19726 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19727 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19728 |
+
batch tensor: labels torch.Size([2, 163840])
|
19729 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19730 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19731 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19732 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19733 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19734 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19735 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19736 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19737 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19738 |
+
batch tensor: labels torch.Size([2, 163840])
|
19739 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19740 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19741 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19742 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19743 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19744 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19745 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19746 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19747 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19748 |
+
batch tensor: labels torch.Size([2, 163840])
|
19749 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19750 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19751 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19752 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19753 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19754 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19755 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19756 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19757 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19758 |
+
batch tensor: labels torch.Size([2, 163840])
|
19759 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19760 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19761 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19762 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19763 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19764 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19765 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19766 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19767 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19768 |
+
batch tensor: labels torch.Size([2, 163840])
|
19769 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19770 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19771 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19772 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19773 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19774 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19775 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19776 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
19777 |
+
batch tensor: tokens torch.Size([2, 163840])
|
19778 |
+
batch tensor: labels torch.Size([2, 163840])
|
19779 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
19780 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
19781 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
19782 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
19783 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
19784 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
19785 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
19786 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
attnserver.run_attnserver.slurm.sh.343237.err.log
CHANGED
@@ -2470,3 +2470,346 @@ W0621 22:05:02.863000 1127779 site-packages/torch/distributed/run.py:766] ******
|
|
2470 |
warnings.warn(
|
2471 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2472 |
warnings.warn(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2470 |
warnings.warn(
|
2471 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2472 |
warnings.warn(
|
2473 |
+
[rank0]: Traceback (most recent call last):
|
2474 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
2475 |
+
[rank0]: pretrain(
|
2476 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
2477 |
+
[rank0]: save_checkpoint(
|
2478 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
2479 |
+
[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
2480 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
2481 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 386, in save
|
2482 |
+
[rank0]: common_strategy.save_common(state_dict, checkpoint_dir)
|
2483 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/common.py", line 48, in save_common
|
2484 |
+
[rank0]: torch.save(common_state_dict, path)
|
2485 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 964, in save
|
2486 |
+
[rank0]: with _open_zipfile_writer(f) as opened_zipfile:
|
2487 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^
|
2488 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 828, in _open_zipfile_writer
|
2489 |
+
[rank0]: return container(name_or_buffer)
|
2490 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^
|
2491 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 792, in __init__
|
2492 |
+
[rank0]: torch._C.PyTorchFileWriter(
|
2493 |
+
[rank0]: RuntimeError: Parent directory gpt-checkpoint/iter_0000010 does not exist.
|
2494 |
+
[rank0]:[W621 22:13:20.782763803 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
2495 |
+
W0621 22:13:38.457000 853291 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 853380 closing signal SIGTERM
|
2496 |
+
W0621 22:13:38.459000 853291 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 853381 closing signal SIGTERM
|
2497 |
+
W0621 22:13:38.464000 853291 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 853382 closing signal SIGTERM
|
2498 |
+
W0621 22:13:38.468000 853291 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 853383 closing signal SIGTERM
|
2499 |
+
W0621 22:13:38.479000 853291 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 853384 closing signal SIGTERM
|
2500 |
+
W0621 22:13:38.487000 853291 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 853385 closing signal SIGTERM
|
2501 |
+
W0621 22:13:38.502000 853291 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 853386 closing signal SIGTERM
|
2502 |
+
E0621 22:13:46.197000 853291 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 853379) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
2503 |
+
Traceback (most recent call last):
|
2504 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
2505 |
+
File "<frozen runpy>", line 88, in _run_code
|
2506 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
2507 |
+
main()
|
2508 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
2509 |
+
return arg(*args, **kwargs)
|
2510 |
+
^^^^^^^^^^^^^^^^^^^^
|
2511 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
2512 |
+
launch(args)
|
2513 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
2514 |
+
run(args)
|
2515 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
2516 |
+
elastic_launch(
|
2517 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
2518 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
2519 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
2520 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
2521 |
+
raise ChildFailedError(
|
2522 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
2523 |
+
============================================================
|
2524 |
+
./pretrain_gpt_profile.py FAILED
|
2525 |
+
------------------------------------------------------------
|
2526 |
+
Failures:
|
2527 |
+
<NO_OTHER_FAILURES>
|
2528 |
+
------------------------------------------------------------
|
2529 |
+
Root Cause (first observed failure):
|
2530 |
+
[0]:
|
2531 |
+
time : 2025-06-21_22:13:38
|
2532 |
+
host : fs-mbz-gpu-274
|
2533 |
+
rank : 0 (local_rank: 0)
|
2534 |
+
exitcode : 1 (pid: 853379)
|
2535 |
+
error_file: <N/A>
|
2536 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
2537 |
+
============================================================
|
2538 |
+
[rank15]:[W621 22:13:46.250610899 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-476]:51028, remote=[fs-mbz-gpu-274]:42927): failed to recv, got 0 bytes
|
2539 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
2540 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x1467bd1785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
2541 |
+
frame #1: <unknown function> + 0x5ba8afe (0x1467a645aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2542 |
+
frame #2: <unknown function> + 0x5baae40 (0x1467a645ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2543 |
+
frame #3: <unknown function> + 0x5bab74a (0x1467a645d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2544 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x1467a64571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2545 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x1467636509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
2546 |
+
frame #6: <unknown function> + 0xd3b6d (0x146753619b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
2547 |
+
frame #7: <unknown function> + 0x94ac3 (0x1467be54dac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
2548 |
+
frame #8: <unknown function> + 0x126850 (0x1467be5df850 in /lib/x86_64-linux-gnu/libc.so.6)
|
2549 |
+
|
2550 |
+
[rank15]:[W621 22:13:46.259342213 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 15] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
2551 |
+
[rank9]:[W621 22:13:46.306709118 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-476]:50982, remote=[fs-mbz-gpu-274]:42927): failed to recv, got 0 bytes
|
2552 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
2553 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x145c133785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
2554 |
+
frame #1: <unknown function> + 0x5ba8afe (0x145bfc25aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2555 |
+
frame #2: <unknown function> + 0x5baae40 (0x145bfc25ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2556 |
+
frame #3: <unknown function> + 0x5bab74a (0x145bfc25d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2557 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x145bfc2571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2558 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x145bb94509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
2559 |
+
frame #6: <unknown function> + 0xd3b6d (0x145c12ef1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
2560 |
+
frame #7: <unknown function> + 0x94ac3 (0x145c1441dac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
2561 |
+
frame #8: <unknown function> + 0x126850 (0x145c144af850 in /lib/x86_64-linux-gnu/libc.so.6)
|
2562 |
+
|
2563 |
+
[rank9]:[W621 22:13:46.310755934 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 9] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
2564 |
+
[rank11]:[W621 22:13:46.306742788 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-476]:51022, remote=[fs-mbz-gpu-274]:42927): failed to recv, got 0 bytes
|
2565 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
2566 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148aabf785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
2567 |
+
frame #1: <unknown function> + 0x5ba8afe (0x148a94e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2568 |
+
frame #2: <unknown function> + 0x5baae40 (0x148a94e5ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2569 |
+
frame #3: <unknown function> + 0x5bab74a (0x148a94e5d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2570 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x148a94e571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2571 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x148a520509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
2572 |
+
frame #6: <unknown function> + 0xd3b6d (0x148aabaf1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
2573 |
+
frame #7: <unknown function> + 0x94ac3 (0x148aacff4ac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
2574 |
+
frame #8: <unknown function> + 0x126850 (0x148aad086850 in /lib/x86_64-linux-gnu/libc.so.6)
|
2575 |
+
|
2576 |
+
[rank11]:[W621 22:13:46.311289102 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 11] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
2577 |
+
+ set +x
|
2578 |
+
W0621 22:13:46.650000 1127779 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1127859 closing signal SIGTERM
|
2579 |
+
W0621 22:13:46.653000 1127779 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1127860 closing signal SIGTERM
|
2580 |
+
W0621 22:13:46.656000 1127779 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1127861 closing signal SIGTERM
|
2581 |
+
W0621 22:13:46.660000 1127779 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1127862 closing signal SIGTERM
|
2582 |
+
W0621 22:13:46.680000 1127779 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1127863 closing signal SIGTERM
|
2583 |
+
W0621 22:13:46.694000 1127779 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1127864 closing signal SIGTERM
|
2584 |
+
W0621 22:13:46.727000 1127779 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1127865 closing signal SIGTERM
|
2585 |
+
W0621 22:13:46.741000 1127779 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1127866 closing signal SIGTERM
|
2586 |
+
[W621 22:13:48.475009412 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-476]:33128, remote=[fs-mbz-gpu-274]:29500): Broken pipe
|
2587 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
2588 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14777cb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
2589 |
+
frame #1: <unknown function> + 0x5ba8afe (0x147765e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2590 |
+
frame #2: <unknown function> + 0x5baa358 (0x147765e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2591 |
+
frame #3: <unknown function> + 0x5babb3e (0x147765e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2592 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x147765e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2593 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x147765e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2594 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x147765e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2595 |
+
frame #7: <unknown function> + 0xc0f526 (0x14777518b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
2596 |
+
frame #8: <unknown function> + 0x37f17d (0x1477748fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
2597 |
+
<omitting python frames>
|
2598 |
+
frame #17: <unknown function> + 0x94ac3 (0x14777dec7ac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
2599 |
+
frame #18: <unknown function> + 0x126850 (0x14777df59850 in /lib/x86_64-linux-gnu/libc.so.6)
|
2600 |
+
|
2601 |
+
W0621 22:13:48.603000 1127779 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-476_1127779_0' has failed to send a keep-alive heartbeat to the rendezvous '343237' due to an error of type RendezvousConnectionError.
|
2602 |
+
[W621 22:13:53.522114287 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-476]:33128, remote=[fs-mbz-gpu-274]:29500): Broken pipe
|
2603 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
2604 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14777cb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
2605 |
+
frame #1: <unknown function> + 0x5ba8afe (0x147765e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2606 |
+
frame #2: <unknown function> + 0x5baa358 (0x147765e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2607 |
+
frame #3: <unknown function> + 0x5babb3e (0x147765e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2608 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x147765e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2609 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x147765e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2610 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x147765e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2611 |
+
frame #7: <unknown function> + 0xc0f526 (0x14777518b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
2612 |
+
frame #8: <unknown function> + 0x37f17d (0x1477748fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
2613 |
+
<omitting python frames>
|
2614 |
+
frame #17: <unknown function> + 0x94ac3 (0x14777dec7ac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
2615 |
+
frame #18: <unknown function> + 0x126850 (0x14777df59850 in /lib/x86_64-linux-gnu/libc.so.6)
|
2616 |
+
|
2617 |
+
W0621 22:13:53.610000 1127779 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-476_1127779_0' has failed to send a keep-alive heartbeat to the rendezvous '343237' due to an error of type RendezvousConnectionError.
|
2618 |
+
[W621 22:13:54.978127470 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-476]:33128, remote=[fs-mbz-gpu-274]:29500): Broken pipe
|
2619 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
2620 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14777cb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
2621 |
+
frame #1: <unknown function> + 0x5ba8afe (0x147765e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2622 |
+
frame #2: <unknown function> + 0x5baa358 (0x147765e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2623 |
+
frame #3: <unknown function> + 0x5babb3e (0x147765e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2624 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x147765e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2625 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x147765e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2626 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x147765e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2627 |
+
frame #7: <unknown function> + 0xc0f526 (0x14777518b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
2628 |
+
frame #8: <unknown function> + 0x37f17d (0x1477748fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
2629 |
+
<omitting python frames>
|
2630 |
+
frame #26: <unknown function> + 0x29d90 (0x14777de5cd90 in /lib/x86_64-linux-gnu/libc.so.6)
|
2631 |
+
frame #27: __libc_start_main + 0x80 (0x14777de5ce40 in /lib/x86_64-linux-gnu/libc.so.6)
|
2632 |
+
|
2633 |
+
W0621 22:13:54.073000 1127779 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-476_1127779_0' has failed to shutdown the rendezvous '343237' due to an error of type RendezvousConnectionError.
|
2634 |
+
[W621 22:13:54.992864590 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-476]:33128, remote=[fs-mbz-gpu-274]:29500): Broken pipe
|
2635 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
2636 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14777cb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
2637 |
+
frame #1: <unknown function> + 0x5ba8afe (0x147765e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2638 |
+
frame #2: <unknown function> + 0x5baa358 (0x147765e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2639 |
+
frame #3: <unknown function> + 0x5babb3e (0x147765e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2640 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x147765e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2641 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x147765e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2642 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x147765e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
2643 |
+
frame #7: <unknown function> + 0xc0f526 (0x14777518b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
2644 |
+
frame #8: <unknown function> + 0x37f17d (0x1477748fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
2645 |
+
<omitting python frames>
|
2646 |
+
frame #26: <unknown function> + 0x29d90 (0x14777de5cd90 in /lib/x86_64-linux-gnu/libc.so.6)
|
2647 |
+
frame #27: __libc_start_main + 0x80 (0x14777de5ce40 in /lib/x86_64-linux-gnu/libc.so.6)
|
2648 |
+
|
2649 |
+
W0621 22:13:54.084000 1127779 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-476_1127779_0' has failed to shutdown the rendezvous '343237' due to an error of type RendezvousConnectionError.
|
2650 |
+
Traceback (most recent call last):
|
2651 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 117, in _call_store
|
2652 |
+
return getattr(self._store, store_op)(*args, **kwargs)
|
2653 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
2654 |
+
torch.distributed.DistNetworkError: failed to recv, got 0 bytes
|
2655 |
+
|
2656 |
+
The above exception was the direct cause of the following exception:
|
2657 |
+
|
2658 |
+
Traceback (most recent call last):
|
2659 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
2660 |
+
File "<frozen runpy>", line 88, in _run_code
|
2661 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
2662 |
+
main()
|
2663 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
2664 |
+
return arg(*args, **kwargs)
|
2665 |
+
^^^^^^^^^^^^^^^^^^^^
|
2666 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
2667 |
+
launch(args)
|
2668 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
2669 |
+
run(args)
|
2670 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
2671 |
+
elastic_launch(
|
2672 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
2673 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
2674 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
2675 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 261, in launch_agent
|
2676 |
+
result = agent.run()
|
2677 |
+
^^^^^^^^^^^
|
2678 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/metrics/api.py", line 138, in wrapper
|
2679 |
+
result = f(*args, **kwargs)
|
2680 |
+
^^^^^^^^^^^^^^^^^^
|
2681 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 711, in run
|
2682 |
+
result = self._invoke_run(role)
|
2683 |
+
^^^^^^^^^^^^^^^^^^^^^^
|
2684 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 906, in _invoke_run
|
2685 |
+
num_nodes_waiting = rdzv_handler.num_nodes_waiting()
|
2686 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
2687 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py", line 1263, in num_nodes_waiting
|
2688 |
+
self._state_holder.sync()
|
2689 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py", line 437, in sync
|
2690 |
+
get_response = self._backend.get_state()
|
2691 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^
|
2692 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 75, in get_state
|
2693 |
+
base64_state: bytes = self._call_store("get", self._key)
|
2694 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
2695 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 119, in _call_store
|
2696 |
+
raise RendezvousConnectionError(
|
2697 |
+
torch.distributed.elastic.rendezvous.api.RendezvousConnectionError: The connection to the C10d store has failed. See inner exception for details.
|
2698 |
+
+ set +x
|
2699 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
2700 |
+
+ export PROF_CTX_LENGTH=98304
|
2701 |
+
+ PROF_CTX_LENGTH=98304
|
2702 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L98304*tp2.cp8.bs1.json'
|
2703 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L98304*tp2.cp8.bs1.json' ']'
|
2704 |
+
+ echo 'Running ctx_length=98304, TP_SIZE=2, CP_SIZE=8, BATCH_SIZE=1'
|
2705 |
+
+ srun bash ./attnserver.sh
|
2706 |
+
+ which python3
|
2707 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 0 --rdzv_id 343237 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-274:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 98304 --max-position-embeddings 98304 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
2708 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
2709 |
+
and will be removed in future. Use torchrun.
|
2710 |
+
Note that --use-env is set by default in torchrun.
|
2711 |
+
If your script expects `--local-rank` argument to be set, please
|
2712 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
2713 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
2714 |
+
further instructions
|
2715 |
+
|
2716 |
+
main()
|
2717 |
+
W0621 22:14:13.517000 857484 site-packages/torch/distributed/run.py:766]
|
2718 |
+
W0621 22:14:13.517000 857484 site-packages/torch/distributed/run.py:766] *****************************************
|
2719 |
+
W0621 22:14:13.517000 857484 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
2720 |
+
W0621 22:14:13.517000 857484 site-packages/torch/distributed/run.py:766] *****************************************
|
2721 |
+
+ which python3
|
2722 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 1 --rdzv_id 343237 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-274:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 98304 --max-position-embeddings 98304 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
2723 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
2724 |
+
and will be removed in future. Use torchrun.
|
2725 |
+
Note that --use-env is set by default in torchrun.
|
2726 |
+
If your script expects `--local-rank` argument to be set, please
|
2727 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
2728 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
2729 |
+
further instructions
|
2730 |
+
|
2731 |
+
main()
|
2732 |
+
W0621 22:14:22.394000 1131728 site-packages/torch/distributed/run.py:766]
|
2733 |
+
W0621 22:14:22.394000 1131728 site-packages/torch/distributed/run.py:766] *****************************************
|
2734 |
+
W0621 22:14:22.394000 1131728 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
2735 |
+
W0621 22:14:22.394000 1131728 site-packages/torch/distributed/run.py:766] *****************************************
|
2736 |
+
[rank4]:[W621 22:14:46.315103722 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2737 |
+
[rank2]:[W621 22:14:46.315140796 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2738 |
+
[rank1]:[W621 22:14:46.315152172 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2739 |
+
[rank3]:[W621 22:14:46.320404593 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2740 |
+
[rank6]:[W621 22:14:46.320536876 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2741 |
+
[rank7]:[W621 22:14:46.320554170 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2742 |
+
[rank5]:[W621 22:14:46.320648694 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2743 |
+
[rank10]:[W621 22:14:46.987794976 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2744 |
+
[rank12]:[W621 22:14:46.987812276 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2745 |
+
[rank14]:[W621 22:14:46.987863902 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2746 |
+
[rank11]:[W621 22:14:46.987875699 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2747 |
+
[rank9]:[W621 22:14:46.987901394 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2748 |
+
[rank15]:[W621 22:14:46.987940895 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2749 |
+
[rank13]:[W621 22:14:46.988172266 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2750 |
+
[rank8]:[W621 22:14:46.217328838 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2751 |
+
[rank0]:[W621 22:14:46.564148941 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
2752 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2753 |
+
warnings.warn(
|
2754 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2755 |
+
warnings.warn(
|
2756 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2757 |
+
warnings.warn(
|
2758 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2759 |
+
warnings.warn(
|
2760 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2761 |
+
warnings.warn(
|
2762 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2763 |
+
warnings.warn(
|
2764 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2765 |
+
warnings.warn(
|
2766 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2767 |
+
warnings.warn(
|
2768 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2769 |
+
warnings.warn(
|
2770 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2771 |
+
warnings.warn(
|
2772 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2773 |
+
warnings.warn(
|
2774 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2775 |
+
warnings.warn(
|
2776 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2777 |
+
warnings.warn(
|
2778 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2779 |
+
warnings.warn(
|
2780 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2781 |
+
warnings.warn(
|
2782 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
2783 |
+
warnings.warn(
|
2784 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2785 |
+
warnings.warn(
|
2786 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2787 |
+
warnings.warn(
|
2788 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2789 |
+
warnings.warn(
|
2790 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2791 |
+
warnings.warn(
|
2792 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2793 |
+
warnings.warn(
|
2794 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2795 |
+
warnings.warn(
|
2796 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2797 |
+
warnings.warn(
|
2798 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2799 |
+
warnings.warn(
|
2800 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2801 |
+
warnings.warn(
|
2802 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2803 |
+
warnings.warn(
|
2804 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2805 |
+
warnings.warn(
|
2806 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2807 |
+
warnings.warn(
|
2808 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2809 |
+
warnings.warn(
|
2810 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2811 |
+
warnings.warn(
|
2812 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2813 |
+
warnings.warn(
|
2814 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
2815 |
+
warnings.warn(
|
attnserver.run_attnserver.slurm.sh.343237.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343238.err.log
CHANGED
@@ -7144,3 +7144,382 @@ W0621 22:04:45.598000 2754239 site-packages/torch/distributed/run.py:766] ******
|
|
7144 |
warnings.warn(
|
7145 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7146 |
warnings.warn(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7144 |
warnings.warn(
|
7145 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7146 |
warnings.warn(
|
7147 |
+
[rank0]: Traceback (most recent call last):
|
7148 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
7149 |
+
[rank0]: pretrain(
|
7150 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
7151 |
+
[rank0]: save_checkpoint(
|
7152 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
7153 |
+
[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
7154 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
7155 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 386, in save
|
7156 |
+
[rank0]: common_strategy.save_common(state_dict, checkpoint_dir)
|
7157 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/common.py", line 48, in save_common
|
7158 |
+
[rank0]: torch.save(common_state_dict, path)
|
7159 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 964, in save
|
7160 |
+
[rank0]: with _open_zipfile_writer(f) as opened_zipfile:
|
7161 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^
|
7162 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 828, in _open_zipfile_writer
|
7163 |
+
[rank0]: return container(name_or_buffer)
|
7164 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^
|
7165 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 792, in __init__
|
7166 |
+
[rank0]: torch._C.PyTorchFileWriter(
|
7167 |
+
[rank0]: RuntimeError: Parent directory gpt-checkpoint/iter_0000010 does not exist.
|
7168 |
+
[rank0]:[W621 22:11:30.270394302 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
7169 |
+
W0621 22:11:40.160000 3522238 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3522311 closing signal SIGTERM
|
7170 |
+
W0621 22:11:40.163000 3522238 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3522312 closing signal SIGTERM
|
7171 |
+
W0621 22:11:40.166000 3522238 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3522313 closing signal SIGTERM
|
7172 |
+
W0621 22:11:40.169000 3522238 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3522314 closing signal SIGTERM
|
7173 |
+
W0621 22:11:40.184000 3522238 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3522315 closing signal SIGTERM
|
7174 |
+
W0621 22:11:40.197000 3522238 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3522316 closing signal SIGTERM
|
7175 |
+
W0621 22:11:40.200000 3522238 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3522317 closing signal SIGTERM
|
7176 |
+
E0621 22:11:43.303000 3522238 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 3522310) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
7177 |
+
Traceback (most recent call last):
|
7178 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
7179 |
+
File "<frozen runpy>", line 88, in _run_code
|
7180 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
7181 |
+
main()
|
7182 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
7183 |
+
return arg(*args, **kwargs)
|
7184 |
+
^^^^^^^^^^^^^^^^^^^^
|
7185 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
7186 |
+
launch(args)
|
7187 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
7188 |
+
run(args)
|
7189 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
7190 |
+
elastic_launch(
|
7191 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
7192 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
7193 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
7194 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
7195 |
+
raise ChildFailedError(
|
7196 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
7197 |
+
============================================================
|
7198 |
+
./pretrain_gpt_profile.py FAILED
|
7199 |
+
------------------------------------------------------------
|
7200 |
+
Failures:
|
7201 |
+
<NO_OTHER_FAILURES>
|
7202 |
+
------------------------------------------------------------
|
7203 |
+
Root Cause (first observed failure):
|
7204 |
+
[0]:
|
7205 |
+
time : 2025-06-21_22:11:40
|
7206 |
+
host : fs-mbz-gpu-518
|
7207 |
+
rank : 0 (local_rank: 0)
|
7208 |
+
exitcode : 1 (pid: 3522310)
|
7209 |
+
error_file: <N/A>
|
7210 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
7211 |
+
============================================================
|
7212 |
+
[rank8]:[W621 22:11:43.901393405 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=75, addr=[fs-mbz-gpu-546]:48124, remote=[fs-mbz-gpu-518]:38983): failed to recv, got 0 bytes
|
7213 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
7214 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14f066f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7215 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14f05025aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7216 |
+
frame #2: <unknown function> + 0x5baae40 (0x14f05025ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7217 |
+
frame #3: <unknown function> + 0x5bab74a (0x14f05025d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7218 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x14f0502571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7219 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x14f00d4509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
7220 |
+
frame #6: <unknown function> + 0xd3b6d (0x14effd419b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
7221 |
+
frame #7: <unknown function> + 0x94ac3 (0x14f0682adac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
7222 |
+
frame #8: <unknown function> + 0x126850 (0x14f06833f850 in /lib/x86_64-linux-gnu/libc.so.6)
|
7223 |
+
|
7224 |
+
[rank8]:[W621 22:11:43.906148549 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 8] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
7225 |
+
[rank14]:[W621 22:11:43.956280987 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-546]:48182, remote=[fs-mbz-gpu-518]:38983): failed to recv, got 0 bytes
|
7226 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
7227 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x15499d5785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7228 |
+
frame #1: <unknown function> + 0x5ba8afe (0x15498645aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7229 |
+
frame #2: <unknown function> + 0x5baae40 (0x15498645ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7230 |
+
frame #3: <unknown function> + 0x5bab74a (0x15498645d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7231 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x1549864571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7232 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x1549436509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
7233 |
+
frame #6: <unknown function> + 0xd3b6d (0x15499d0f1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
7234 |
+
frame #7: <unknown function> + 0x94ac3 (0x15499e654ac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
7235 |
+
frame #8: <unknown function> + 0x126850 (0x15499e6e6850 in /lib/x86_64-linux-gnu/libc.so.6)
|
7236 |
+
|
7237 |
+
[rank14]:[W621 22:11:43.960548931 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 14] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
7238 |
+
[rank12]:[W621 22:11:43.000331233 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-546]:48158, remote=[fs-mbz-gpu-518]:38983): failed to recv, got 0 bytes
|
7239 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
7240 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14ac5cf785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7241 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14ac45e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7242 |
+
frame #2: <unknown function> + 0x5baae40 (0x14ac45e5ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7243 |
+
frame #3: <unknown function> + 0x5bab74a (0x14ac45e5d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7244 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x14ac45e571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7245 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x14ac030509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
7246 |
+
frame #6: <unknown function> + 0xd3b6d (0x14ac5caf1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
7247 |
+
frame #7: <unknown function> + 0x94ac3 (0x14ac5e05fac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
7248 |
+
frame #8: <unknown function> + 0x126850 (0x14ac5e0f1850 in /lib/x86_64-linux-gnu/libc.so.6)
|
7249 |
+
|
7250 |
+
[rank12]:[W621 22:11:43.004434512 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 12] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
7251 |
+
[rank15]:[W621 22:11:43.012364780 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-546]:48140, remote=[fs-mbz-gpu-518]:38983): failed to recv, got 0 bytes
|
7252 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
7253 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14aed4b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7254 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14aebda5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7255 |
+
frame #2: <unknown function> + 0x5baae40 (0x14aebda5ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7256 |
+
frame #3: <unknown function> + 0x5bab74a (0x14aebda5d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7257 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x14aebda571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7258 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x14ae7ac509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
7259 |
+
frame #6: <unknown function> + 0xd3b6d (0x14aed46f1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
7260 |
+
frame #7: <unknown function> + 0x94ac3 (0x14aed5bffac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
7261 |
+
frame #8: <unknown function> + 0x126850 (0x14aed5c91850 in /lib/x86_64-linux-gnu/libc.so.6)
|
7262 |
+
|
7263 |
+
[rank15]:[W621 22:11:43.016269886 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 15] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
7264 |
+
[rank11]:[W621 22:11:43.012363687 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-546]:48178, remote=[fs-mbz-gpu-518]:38983): failed to recv, got 0 bytes
|
7265 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
7266 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14651c7785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7267 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14650565aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7268 |
+
frame #2: <unknown function> + 0x5baae40 (0x14650565ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7269 |
+
frame #3: <unknown function> + 0x5bab74a (0x14650565d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7270 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x1465056571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7271 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x1464c28509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
7272 |
+
frame #6: <unknown function> + 0xd3b6d (0x14651c2f1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
7273 |
+
frame #7: <unknown function> + 0x94ac3 (0x14651d825ac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
7274 |
+
frame #8: <unknown function> + 0x126850 (0x14651d8b7850 in /lib/x86_64-linux-gnu/libc.so.6)
|
7275 |
+
|
7276 |
+
[rank11]:[W621 22:11:43.016683831 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 11] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
7277 |
+
[rank13]:[W621 22:11:43.012478440 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-546]:48138, remote=[fs-mbz-gpu-518]:38983): failed to recv, got 0 bytes
|
7278 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
7279 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14b98bb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7280 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14b974a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7281 |
+
frame #2: <unknown function> + 0x5baae40 (0x14b974a5ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7282 |
+
frame #3: <unknown function> + 0x5bab74a (0x14b974a5d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7283 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x14b974a571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7284 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x14b931c509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
7285 |
+
frame #6: <unknown function> + 0xd3b6d (0x14b98b6f1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
7286 |
+
frame #7: <unknown function> + 0x94ac3 (0x14b98cc31ac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
7287 |
+
frame #8: <unknown function> + 0x126850 (0x14b98ccc3850 in /lib/x86_64-linux-gnu/libc.so.6)
|
7288 |
+
|
7289 |
+
[rank9]:[W621 22:11:43.012604382 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-546]:48170, remote=[fs-mbz-gpu-518]:38983): failed to recv, got 0 bytes
|
7290 |
+
Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
|
7291 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x1472ba1785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7292 |
+
frame #1: <unknown function> + 0x5ba8afe (0x1472a305aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7293 |
+
frame #2: <unknown function> + 0x5baae40 (0x1472a305ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7294 |
+
frame #3: <unknown function> + 0x5bab74a (0x1472a305d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7295 |
+
frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x1472a30571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7296 |
+
frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x1472602509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
|
7297 |
+
frame #6: <unknown function> + 0xd3b6d (0x1472b9cf1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
|
7298 |
+
frame #7: <unknown function> + 0x94ac3 (0x1472bb1bbac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
7299 |
+
frame #8: <unknown function> + 0x126850 (0x1472bb24d850 in /lib/x86_64-linux-gnu/libc.so.6)
|
7300 |
+
|
7301 |
+
[rank13]:[W621 22:11:43.016790533 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 13] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
7302 |
+
[rank9]:[W621 22:11:43.016820469 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 9] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
|
7303 |
+
+ set +x
|
7304 |
+
W0621 22:11:44.242000 2754239 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2754308 closing signal SIGTERM
|
7305 |
+
W0621 22:11:44.247000 2754239 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2754309 closing signal SIGTERM
|
7306 |
+
W0621 22:11:44.250000 2754239 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2754310 closing signal SIGTERM
|
7307 |
+
W0621 22:11:44.253000 2754239 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2754311 closing signal SIGTERM
|
7308 |
+
W0621 22:11:44.255000 2754239 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2754312 closing signal SIGTERM
|
7309 |
+
W0621 22:11:44.268000 2754239 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2754313 closing signal SIGTERM
|
7310 |
+
W0621 22:11:44.276000 2754239 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2754314 closing signal SIGTERM
|
7311 |
+
W0621 22:11:44.293000 2754239 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2754315 closing signal SIGTERM
|
7312 |
+
[W621 22:11:46.672843694 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-546]:44530, remote=[fs-mbz-gpu-518]:29500): Broken pipe
|
7313 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
7314 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148c7e9785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7315 |
+
frame #1: <unknown function> + 0x5ba8afe (0x148c6785aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7316 |
+
frame #2: <unknown function> + 0x5baa358 (0x148c6785c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7317 |
+
frame #3: <unknown function> + 0x5babb3e (0x148c6785db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7318 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x148c67857ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7319 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x148c67857ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7320 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x148c67858f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7321 |
+
frame #7: <unknown function> + 0xc0f526 (0x148c76b8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
7322 |
+
frame #8: <unknown function> + 0x37f17d (0x148c762fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
7323 |
+
<omitting python frames>
|
7324 |
+
frame #17: <unknown function> + 0x94ac3 (0x148c7f9efac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
7325 |
+
frame #18: <unknown function> + 0x126850 (0x148c7fa81850 in /lib/x86_64-linux-gnu/libc.so.6)
|
7326 |
+
|
7327 |
+
W0621 22:11:46.204000 2754239 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-546_2754239_0' has failed to send a keep-alive heartbeat to the rendezvous '343238' due to an error of type RendezvousConnectionError.
|
7328 |
+
[W621 22:11:47.657307998 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-546]:44530, remote=[fs-mbz-gpu-518]:29500): Broken pipe
|
7329 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
7330 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148c7e9785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7331 |
+
frame #1: <unknown function> + 0x5ba8afe (0x148c6785aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7332 |
+
frame #2: <unknown function> + 0x5baa358 (0x148c6785c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7333 |
+
frame #3: <unknown function> + 0x5babb3e (0x148c6785db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7334 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x148c67857ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7335 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x148c67857ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7336 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x148c67858f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7337 |
+
frame #7: <unknown function> + 0xc0f526 (0x148c76b8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
7338 |
+
frame #8: <unknown function> + 0x37f17d (0x148c762fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
7339 |
+
<omitting python frames>
|
7340 |
+
frame #26: <unknown function> + 0x29d90 (0x148c7f984d90 in /lib/x86_64-linux-gnu/libc.so.6)
|
7341 |
+
frame #27: __libc_start_main + 0x80 (0x148c7f984e40 in /lib/x86_64-linux-gnu/libc.so.6)
|
7342 |
+
|
7343 |
+
W0621 22:11:47.194000 2754239 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-546_2754239_0' has failed to shutdown the rendezvous '343238' due to an error of type RendezvousConnectionError.
|
7344 |
+
[W621 22:11:47.672909655 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-546]:44530, remote=[fs-mbz-gpu-518]:29500): Broken pipe
|
7345 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
7346 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148c7e9785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
7347 |
+
frame #1: <unknown function> + 0x5ba8afe (0x148c6785aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7348 |
+
frame #2: <unknown function> + 0x5baa358 (0x148c6785c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7349 |
+
frame #3: <unknown function> + 0x5babb3e (0x148c6785db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7350 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x148c67857ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7351 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x148c67857ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7352 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x148c67858f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
7353 |
+
frame #7: <unknown function> + 0xc0f526 (0x148c76b8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
7354 |
+
frame #8: <unknown function> + 0x37f17d (0x148c762fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
7355 |
+
<omitting python frames>
|
7356 |
+
frame #26: <unknown function> + 0x29d90 (0x148c7f984d90 in /lib/x86_64-linux-gnu/libc.so.6)
|
7357 |
+
frame #27: __libc_start_main + 0x80 (0x148c7f984e40 in /lib/x86_64-linux-gnu/libc.so.6)
|
7358 |
+
|
7359 |
+
W0621 22:11:47.206000 2754239 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-546_2754239_0' has failed to shutdown the rendezvous '343238' due to an error of type RendezvousConnectionError.
|
7360 |
+
Traceback (most recent call last):
|
7361 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 117, in _call_store
|
7362 |
+
return getattr(self._store, store_op)(*args, **kwargs)
|
7363 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
7364 |
+
torch.distributed.DistNetworkError: failed to recv, got 0 bytes
|
7365 |
+
|
7366 |
+
The above exception was the direct cause of the following exception:
|
7367 |
+
|
7368 |
+
Traceback (most recent call last):
|
7369 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
7370 |
+
File "<frozen runpy>", line 88, in _run_code
|
7371 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
7372 |
+
main()
|
7373 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
7374 |
+
return arg(*args, **kwargs)
|
7375 |
+
^^^^^^^^^^^^^^^^^^^^
|
7376 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
7377 |
+
launch(args)
|
7378 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
7379 |
+
run(args)
|
7380 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
7381 |
+
elastic_launch(
|
7382 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
7383 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
7384 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
7385 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 261, in launch_agent
|
7386 |
+
result = agent.run()
|
7387 |
+
^^^^^^^^^^^
|
7388 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/metrics/api.py", line 138, in wrapper
|
7389 |
+
result = f(*args, **kwargs)
|
7390 |
+
^^^^^^^^^^^^^^^^^^
|
7391 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 711, in run
|
7392 |
+
result = self._invoke_run(role)
|
7393 |
+
^^^^^^^^^^^^^^^^^^^^^^
|
7394 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 906, in _invoke_run
|
7395 |
+
num_nodes_waiting = rdzv_handler.num_nodes_waiting()
|
7396 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
7397 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py", line 1263, in num_nodes_waiting
|
7398 |
+
self._state_holder.sync()
|
7399 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py", line 437, in sync
|
7400 |
+
get_response = self._backend.get_state()
|
7401 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^
|
7402 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 75, in get_state
|
7403 |
+
base64_state: bytes = self._call_store("get", self._key)
|
7404 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
7405 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 119, in _call_store
|
7406 |
+
raise RendezvousConnectionError(
|
7407 |
+
torch.distributed.elastic.rendezvous.api.RendezvousConnectionError: The connection to the C10d store has failed. See inner exception for details.
|
7408 |
+
+ set +x
|
7409 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
7410 |
+
+ export PROF_CTX_LENGTH=81920
|
7411 |
+
+ PROF_CTX_LENGTH=81920
|
7412 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L81920*tp2.cp8.bs2.json'
|
7413 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L81920*tp2.cp8.bs2.json' ']'
|
7414 |
+
+ echo 'Running ctx_length=81920, TP_SIZE=2, CP_SIZE=8, BATCH_SIZE=2'
|
7415 |
+
+ srun bash ./attnserver.sh
|
7416 |
+
+ which python3
|
7417 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 0 --rdzv_id 343238 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-518:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 81920 --max-position-embeddings 81920 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
7418 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
7419 |
+
and will be removed in future. Use torchrun.
|
7420 |
+
Note that --use-env is set by default in torchrun.
|
7421 |
+
If your script expects `--local-rank` argument to be set, please
|
7422 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
7423 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
7424 |
+
further instructions
|
7425 |
+
|
7426 |
+
main()
|
7427 |
+
W0621 22:11:58.313000 3525952 site-packages/torch/distributed/run.py:766]
|
7428 |
+
W0621 22:11:58.313000 3525952 site-packages/torch/distributed/run.py:766] *****************************************
|
7429 |
+
W0621 22:11:58.313000 3525952 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
7430 |
+
W0621 22:11:58.313000 3525952 site-packages/torch/distributed/run.py:766] *****************************************
|
7431 |
+
+ which python3
|
7432 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 1 --rdzv_id 343238 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-518:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 81920 --max-position-embeddings 81920 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
7433 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
7434 |
+
and will be removed in future. Use torchrun.
|
7435 |
+
Note that --use-env is set by default in torchrun.
|
7436 |
+
If your script expects `--local-rank` argument to be set, please
|
7437 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
7438 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
7439 |
+
further instructions
|
7440 |
+
|
7441 |
+
main()
|
7442 |
+
W0621 22:12:03.366000 2757876 site-packages/torch/distributed/run.py:766]
|
7443 |
+
W0621 22:12:03.366000 2757876 site-packages/torch/distributed/run.py:766] *****************************************
|
7444 |
+
W0621 22:12:03.366000 2757876 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
7445 |
+
W0621 22:12:03.366000 2757876 site-packages/torch/distributed/run.py:766] *****************************************
|
7446 |
+
[rank15]:[W621 22:12:26.529643661 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7447 |
+
[rank1]:[W621 22:12:26.887649999 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7448 |
+
[rank4]:[W621 22:12:26.887649333 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7449 |
+
[rank6]:[W621 22:12:26.887678852 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7450 |
+
[rank5]:[W621 22:12:26.887712594 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7451 |
+
[rank2]:[W621 22:12:26.887764574 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7452 |
+
[rank7]:[W621 22:12:26.887817820 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7453 |
+
[rank3]:[W621 22:12:26.887817826 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7454 |
+
[rank9]:[W621 22:12:26.540115439 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7455 |
+
[rank11]:[W621 22:12:26.540201507 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7456 |
+
[rank13]:[W621 22:12:26.543492971 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7457 |
+
[rank14]:[W621 22:12:26.543613825 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7458 |
+
[rank10]:[W621 22:12:26.543614372 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7459 |
+
[rank12]:[W621 22:12:26.543940035 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7460 |
+
[rank0]:[W621 22:12:26.124198092 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7461 |
+
[rank8]:[W621 22:12:26.796569599 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
7462 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7463 |
+
warnings.warn(
|
7464 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7465 |
+
warnings.warn(
|
7466 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7467 |
+
warnings.warn(
|
7468 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7469 |
+
warnings.warn(
|
7470 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7471 |
+
warnings.warn(
|
7472 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7473 |
+
warnings.warn(
|
7474 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7475 |
+
warnings.warn(
|
7476 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7477 |
+
warnings.warn(
|
7478 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7479 |
+
warnings.warn(
|
7480 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7481 |
+
warnings.warn(
|
7482 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7483 |
+
warnings.warn(
|
7484 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7485 |
+
warnings.warn(
|
7486 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7487 |
+
warnings.warn(
|
7488 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7489 |
+
warnings.warn(
|
7490 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7491 |
+
warnings.warn(
|
7492 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
7493 |
+
warnings.warn(
|
7494 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7495 |
+
warnings.warn(
|
7496 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7497 |
+
warnings.warn(
|
7498 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7499 |
+
warnings.warn(
|
7500 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7501 |
+
warnings.warn(
|
7502 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7503 |
+
warnings.warn(
|
7504 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7505 |
+
warnings.warn(
|
7506 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7507 |
+
warnings.warn(
|
7508 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7509 |
+
warnings.warn(
|
7510 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7511 |
+
warnings.warn(
|
7512 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7513 |
+
warnings.warn(
|
7514 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7515 |
+
warnings.warn(
|
7516 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7517 |
+
warnings.warn(
|
7518 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7519 |
+
warnings.warn(
|
7520 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7521 |
+
warnings.warn(
|
7522 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7523 |
+
warnings.warn(
|
7524 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
7525 |
+
warnings.warn(
|
attnserver.run_attnserver.slurm.sh.343238.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343239.err.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343239.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343240.err.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343240.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343241.err.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343241.out.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343242.err.log
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /mnt/weka/home/hao.zhang/conda/miniconda/bin/activate
|
2 |
+
++ _CONDA_ROOT=/mnt/weka/home/hao.zhang/conda/miniconda
|
3 |
+
++ . /mnt/weka/home/hao.zhang/conda/miniconda/etc/profile.d/conda.sh
|
4 |
+
+++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
5 |
+
+++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
6 |
+
+++ export _CE_M=
|
7 |
+
+++ _CE_M=
|
8 |
+
+++ export _CE_CONDA=
|
9 |
+
+++ _CE_CONDA=
|
10 |
+
+++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
11 |
+
+++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
12 |
+
+++ '[' -z x ']'
|
13 |
+
++ conda activate
|
14 |
+
++ local cmd=activate
|
15 |
+
++ case "$cmd" in
|
16 |
+
++ __conda_activate activate
|
17 |
+
++ '[' -n '' ']'
|
18 |
+
++ local ask_conda
|
19 |
+
+++ PS1=
|
20 |
+
+++ __conda_exe shell.posix activate
|
21 |
+
+++ '[' -n '' ']'
|
22 |
+
+++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate
|
23 |
+
++ ask_conda='unset _CE_M
|
24 |
+
unset _CE_CONDA
|
25 |
+
PS1='\''(base) '\''
|
26 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
27 |
+
export CONDA_SHLVL='\''1'\''
|
28 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
29 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
30 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
31 |
+
++ eval 'unset _CE_M
|
32 |
+
unset _CE_CONDA
|
33 |
+
PS1='\''(base) '\''
|
34 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
35 |
+
export CONDA_SHLVL='\''1'\''
|
36 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
37 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
38 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
39 |
+
+++ unset _CE_M
|
40 |
+
+++ unset _CE_CONDA
|
41 |
+
+++ PS1='(base) '
|
42 |
+
+++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
43 |
+
+++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
44 |
+
+++ export CONDA_SHLVL=1
|
45 |
+
+++ CONDA_SHLVL=1
|
46 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
47 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
48 |
+
+++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
49 |
+
+++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
50 |
+
+++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
51 |
+
+++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
52 |
+
++ __conda_hashr
|
53 |
+
++ '[' -n '' ']'
|
54 |
+
++ '[' -n '' ']'
|
55 |
+
++ hash -r
|
56 |
+
+ conda activate junda-attnserver
|
57 |
+
+ local cmd=activate
|
58 |
+
+ case "$cmd" in
|
59 |
+
+ __conda_activate activate junda-attnserver
|
60 |
+
+ '[' -n '' ']'
|
61 |
+
+ local ask_conda
|
62 |
+
++ PS1='(base) '
|
63 |
+
++ __conda_exe shell.posix activate junda-attnserver
|
64 |
+
++ '[' -n '' ']'
|
65 |
+
++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate junda-attnserver
|
66 |
+
+ ask_conda='unset _CE_M
|
67 |
+
unset _CE_CONDA
|
68 |
+
PS1='\''(junda-attnserver) '\''
|
69 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
70 |
+
export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
|
71 |
+
export CONDA_SHLVL='\''2'\''
|
72 |
+
export CONDA_DEFAULT_ENV='\''junda-attnserver'\''
|
73 |
+
export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\''
|
74 |
+
export CONDA_PREFIX_1='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
|
75 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
76 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
77 |
+
+ eval 'unset _CE_M
|
78 |
+
unset _CE_CONDA
|
79 |
+
PS1='\''(junda-attnserver) '\''
|
80 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
81 |
+
export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
|
82 |
+
export CONDA_SHLVL='\''2'\''
|
83 |
+
export CONDA_DEFAULT_ENV='\''junda-attnserver'\''
|
84 |
+
export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\''
|
85 |
+
export CONDA_PREFIX_1='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
|
86 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
87 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
88 |
+
++ unset _CE_M
|
89 |
+
++ unset _CE_CONDA
|
90 |
+
++ PS1='(junda-attnserver) '
|
91 |
+
++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
92 |
+
++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
93 |
+
++ export CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
|
94 |
+
++ CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
|
95 |
+
++ export CONDA_SHLVL=2
|
96 |
+
++ CONDA_SHLVL=2
|
97 |
+
++ export CONDA_DEFAULT_ENV=junda-attnserver
|
98 |
+
++ CONDA_DEFAULT_ENV=junda-attnserver
|
99 |
+
++ export 'CONDA_PROMPT_MODIFIER=(junda-attnserver) '
|
100 |
+
++ CONDA_PROMPT_MODIFIER='(junda-attnserver) '
|
101 |
+
++ export CONDA_PREFIX_1=/mnt/weka/home/hao.zhang/conda/miniconda
|
102 |
+
++ CONDA_PREFIX_1=/mnt/weka/home/hao.zhang/conda/miniconda
|
103 |
+
++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
104 |
+
++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
105 |
+
++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
106 |
+
++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
107 |
+
+ __conda_hashr
|
108 |
+
+ '[' -n '' ']'
|
109 |
+
+ '[' -n '' ']'
|
110 |
+
+ hash -r
|
111 |
+
+ export CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5
|
112 |
+
+ CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5
|
113 |
+
+ mkdir -p /mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5
|
114 |
+
+ export PROF_TP_SIZE=2
|
115 |
+
+ PROF_TP_SIZE=2
|
116 |
+
+ export PROF_CP_SIZE=8
|
117 |
+
+ PROF_CP_SIZE=8
|
118 |
+
+ export PROF_BS=32
|
119 |
+
+ PROF_BS=32
|
120 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
121 |
+
+ export PROF_CTX_LENGTH=1024
|
122 |
+
+ PROF_CTX_LENGTH=1024
|
123 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L1024*tp2.cp8.bs32.json'
|
124 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L1024*tp2.cp8.bs32.json' ']'
|
125 |
+
+ echo 'Running ctx_length=1024, TP_SIZE=2, CP_SIZE=8, BATCH_SIZE=32'
|
126 |
+
+ srun bash ./attnserver.sh
|
127 |
+
+ which python3
|
128 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 0 --rdzv_id 343242 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-188:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
129 |
+
+ which python3
|
130 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 1 --rdzv_id 343242 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-188:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
131 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
132 |
+
and will be removed in future. Use torchrun.
|
133 |
+
Note that --use-env is set by default in torchrun.
|
134 |
+
If your script expects `--local-rank` argument to be set, please
|
135 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
136 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
137 |
+
further instructions
|
138 |
+
|
139 |
+
main()
|
140 |
+
W0621 22:19:18.745000 822159 site-packages/torch/distributed/run.py:766]
|
141 |
+
W0621 22:19:18.745000 822159 site-packages/torch/distributed/run.py:766] *****************************************
|
142 |
+
W0621 22:19:18.745000 822159 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
143 |
+
W0621 22:19:18.745000 822159 site-packages/torch/distributed/run.py:766] *****************************************
|
144 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
145 |
+
and will be removed in future. Use torchrun.
|
146 |
+
Note that --use-env is set by default in torchrun.
|
147 |
+
If your script expects `--local-rank` argument to be set, please
|
148 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
149 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
150 |
+
further instructions
|
151 |
+
|
152 |
+
main()
|
153 |
+
W0621 22:19:18.788000 2168212 site-packages/torch/distributed/run.py:766]
|
154 |
+
W0621 22:19:18.788000 2168212 site-packages/torch/distributed/run.py:766] *****************************************
|
155 |
+
W0621 22:19:18.788000 2168212 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
156 |
+
W0621 22:19:18.788000 2168212 site-packages/torch/distributed/run.py:766] *****************************************
|
attnserver.run_attnserver.slurm.sh.343242.out.log
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Running ctx_length=1024, TP_SIZE=2, CP_SIZE=8, BATCH_SIZE=32
|
2 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
3 |
+
--------------------------------
|
4 |
+
CTX_LENGTH: 1024
|
5 |
+
TP_SIZE: 2
|
6 |
+
CP_SIZE: 8
|
7 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
8 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
9 |
+
--------------------------------
|
10 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
11 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
12 |
+
--------------------------------
|
13 |
+
CTX_LENGTH: 1024
|
14 |
+
TP_SIZE: 2
|
15 |
+
CP_SIZE: 8
|
16 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
17 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
18 |
+
--------------------------------
|
19 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
attnserver.run_attnserver.slurm.sh.343243.err.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343243.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343244.err.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343244.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343248.err.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343248.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343261.err.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343261.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343262.err.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343262.out.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343263.err.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343263.out.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
attnserver.run_attnserver.slurm.sh.343264.err.log
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
+ source /mnt/weka/home/hao.zhang/conda/miniconda/bin/activate
|
2 |
+
++ _CONDA_ROOT=/mnt/weka/home/hao.zhang/conda/miniconda
|
3 |
+
++ . /mnt/weka/home/hao.zhang/conda/miniconda/etc/profile.d/conda.sh
|
4 |
+
+++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
5 |
+
+++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
6 |
+
+++ export _CE_M=
|
7 |
+
+++ _CE_M=
|
8 |
+
+++ export _CE_CONDA=
|
9 |
+
+++ _CE_CONDA=
|
10 |
+
+++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
11 |
+
+++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
12 |
+
+++ '[' -z x ']'
|
13 |
+
++ conda activate
|
14 |
+
++ local cmd=activate
|
15 |
+
++ case "$cmd" in
|
16 |
+
++ __conda_activate activate
|
17 |
+
++ '[' -n '' ']'
|
18 |
+
++ local ask_conda
|
19 |
+
+++ PS1=
|
20 |
+
+++ __conda_exe shell.posix activate
|
21 |
+
+++ '[' -n '' ']'
|
22 |
+
+++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate
|
23 |
+
++ ask_conda='unset _CE_M
|
24 |
+
unset _CE_CONDA
|
25 |
+
PS1='\''(base) '\''
|
26 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
27 |
+
export CONDA_SHLVL='\''1'\''
|
28 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
29 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
30 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
31 |
+
++ eval 'unset _CE_M
|
32 |
+
unset _CE_CONDA
|
33 |
+
PS1='\''(base) '\''
|
34 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
35 |
+
export CONDA_SHLVL='\''1'\''
|
36 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
37 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
38 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
39 |
+
+++ unset _CE_M
|
40 |
+
+++ unset _CE_CONDA
|
41 |
+
+++ PS1='(base) '
|
42 |
+
+++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
43 |
+
+++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
44 |
+
+++ export CONDA_SHLVL=1
|
45 |
+
+++ CONDA_SHLVL=1
|
46 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
47 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
48 |
+
+++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
49 |
+
+++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
50 |
+
+++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
51 |
+
+++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
52 |
+
++ __conda_hashr
|
53 |
+
++ '[' -n '' ']'
|
54 |
+
++ '[' -n '' ']'
|
55 |
+
++ hash -r
|
56 |
+
+ conda activate junda-attnserver
|
57 |
+
+ local cmd=activate
|
58 |
+
+ case "$cmd" in
|
59 |
+
+ __conda_activate activate junda-attnserver
|
60 |
+
+ '[' -n '' ']'
|
61 |
+
+ local ask_conda
|
62 |
+
++ PS1='(base) '
|
63 |
+
++ __conda_exe shell.posix activate junda-attnserver
|
64 |
+
++ '[' -n '' ']'
|
65 |
+
++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate junda-attnserver
|
66 |
+
+ ask_conda='unset _CE_M
|
67 |
+
unset _CE_CONDA
|
68 |
+
PS1='\''(junda-attnserver) '\''
|
69 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
70 |
+
export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
|
71 |
+
export CONDA_SHLVL='\''2'\''
|
72 |
+
export CONDA_DEFAULT_ENV='\''junda-attnserver'\''
|
73 |
+
export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\''
|
74 |
+
export CONDA_PREFIX_1='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
|
75 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
76 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
77 |
+
+ eval 'unset _CE_M
|
78 |
+
unset _CE_CONDA
|
79 |
+
PS1='\''(junda-attnserver) '\''
|
80 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
81 |
+
export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
|
82 |
+
export CONDA_SHLVL='\''2'\''
|
83 |
+
export CONDA_DEFAULT_ENV='\''junda-attnserver'\''
|
84 |
+
export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\''
|
85 |
+
export CONDA_PREFIX_1='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
|
86 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
87 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
88 |
+
++ unset _CE_M
|
89 |
+
++ unset _CE_CONDA
|
90 |
+
++ PS1='(junda-attnserver) '
|
91 |
+
++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
92 |
+
++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
93 |
+
++ export CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
|
94 |
+
++ CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
|
95 |
+
++ export CONDA_SHLVL=2
|
96 |
+
++ CONDA_SHLVL=2
|
97 |
+
++ export CONDA_DEFAULT_ENV=junda-attnserver
|
98 |
+
++ CONDA_DEFAULT_ENV=junda-attnserver
|
99 |
+
++ export 'CONDA_PROMPT_MODIFIER=(junda-attnserver) '
|
100 |
+
++ CONDA_PROMPT_MODIFIER='(junda-attnserver) '
|
101 |
+
++ export CONDA_PREFIX_1=/mnt/weka/home/hao.zhang/conda/miniconda
|
102 |
+
++ CONDA_PREFIX_1=/mnt/weka/home/hao.zhang/conda/miniconda
|
103 |
+
++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
104 |
+
++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
105 |
+
++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
106 |
+
++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
107 |
+
+ __conda_hashr
|
108 |
+
+ '[' -n '' ']'
|
109 |
+
+ '[' -n '' ']'
|
110 |
+
+ hash -r
|
111 |
+
+ export CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5
|
112 |
+
+ CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5
|
113 |
+
+ mkdir -p /mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5
|
114 |
+
+ export PROF_TP_SIZE=1
|
115 |
+
+ PROF_TP_SIZE=1
|
116 |
+
+ export PROF_CP_SIZE=8
|
117 |
+
+ PROF_CP_SIZE=8
|
118 |
+
+ export PROF_BS=8
|
119 |
+
+ PROF_BS=8
|
120 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
121 |
+
+ export PROF_CTX_LENGTH=1024
|
122 |
+
+ PROF_CTX_LENGTH=1024
|
123 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L1024*tp1.cp8.bs8.json'
|
124 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L1024*tp1.cp8.bs8.json' ']'
|
125 |
+
+ echo 'Running ctx_length=1024, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=8'
|
126 |
+
+ srun bash ./attnserver.sh
|
127 |
+
+ which python3
|
128 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343264 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
129 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
130 |
+
and will be removed in future. Use torchrun.
|
131 |
+
Note that --use-env is set by default in torchrun.
|
132 |
+
If your script expects `--local-rank` argument to be set, please
|
133 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
134 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
135 |
+
further instructions
|
136 |
+
|
137 |
+
main()
|
138 |
+
W0621 22:18:09.812000 1550851 site-packages/torch/distributed/run.py:766]
|
139 |
+
W0621 22:18:09.812000 1550851 site-packages/torch/distributed/run.py:766] *****************************************
|
140 |
+
W0621 22:18:09.812000 1550851 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
141 |
+
W0621 22:18:09.812000 1550851 site-packages/torch/distributed/run.py:766] *****************************************
|
142 |
+
[rank4]:[W621 22:18:31.551180804 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
143 |
+
[rank3]:[W621 22:18:31.551180797 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
144 |
+
[rank2]:[W621 22:18:31.551202665 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
145 |
+
[rank7]:[W621 22:18:31.551235849 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
146 |
+
[rank6]:[W621 22:18:31.551267004 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
147 |
+
[rank5]:[W621 22:18:31.551293533 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
148 |
+
[rank1]:[W621 22:18:31.551372229 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
149 |
+
[rank0]:[W621 22:18:31.760470010 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
150 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
151 |
+
warnings.warn(
|
152 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
153 |
+
warnings.warn(
|
154 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
155 |
+
warnings.warn(
|
156 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
157 |
+
warnings.warn(
|
158 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
159 |
+
warnings.warn(
|
160 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
161 |
+
warnings.warn(
|
162 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
163 |
+
warnings.warn(
|
164 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
165 |
+
warnings.warn(
|
166 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
167 |
+
warnings.warn(
|
168 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
169 |
+
warnings.warn(
|
170 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
171 |
+
warnings.warn(
|
172 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
173 |
+
warnings.warn(
|
174 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
175 |
+
warnings.warn(
|
176 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
177 |
+
warnings.warn(
|
178 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
179 |
+
warnings.warn(
|
180 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
181 |
+
warnings.warn(
|
182 |
+
[rank0]: Traceback (most recent call last):
|
183 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
184 |
+
[rank0]: pretrain(
|
185 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
186 |
+
[rank0]: save_checkpoint(
|
187 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
188 |
+
[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
189 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
190 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
191 |
+
[rank0]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
192 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
193 |
+
[rank0]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
194 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
195 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
196 |
+
[rank0]: async_calls.maybe_finalize_async_calls(blocking=True)
|
197 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
198 |
+
[rank0]: finalize_fn()
|
199 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
200 |
+
[rank0]: save_state_dict_async_finalize(*save_state_dict_ret)
|
201 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 243, in save_state_dict_async_finalize
|
202 |
+
[rank0]: storage_writer.finish(global_metadata, all_results)
|
203 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 483, in finish
|
204 |
+
[rank0]: super().finish(metadata, results)
|
205 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 697, in finish
|
206 |
+
[rank0]: with self.fs.create_stream(tmp_path, "wb") as metadata_file:
|
207 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
208 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/contextlib.py", line 137, in __enter__
|
209 |
+
[rank0]: return next(self.gen)
|
210 |
+
[rank0]: ^^^^^^^^^^^^^^
|
211 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 476, in create_stream
|
212 |
+
[rank0]: with path.open(mode) as stream:
|
213 |
+
[rank0]: ^^^^^^^^^^^^^^^
|
214 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/pathlib.py", line 1013, in open
|
215 |
+
[rank0]: return io.open(self, mode, buffering, encoding, errors, newline)
|
216 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
217 |
+
[rank0]: FileNotFoundError: [Errno 2] No such file or directory: 'gpt-checkpoint/iter_0000010/.metadata.tmp'
|
attnserver.run_attnserver.slurm.sh.343264.out.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|