Upload folder using huggingface_hub
Browse files- attnserver.run_attnserver.slurm.sh.343207.out.log +70 -0
- attnserver.run_attnserver.slurm.sh.343213.out.log +2 -0
- attnserver.run_attnserver.slurm.sh.343214.err.log +430 -0
- attnserver.run_attnserver.slurm.sh.343214.out.log +592 -0
- attnserver.run_attnserver.slurm.sh.343215.err.log +2 -2
- attnserver.run_attnserver.slurm.sh.343215.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343225.out.log +16 -0
- attnserver.run_attnserver.slurm.sh.343226.out.log +83 -0
- attnserver.run_attnserver.slurm.sh.343237.out.log +314 -0
- attnserver.run_attnserver.slurm.sh.343238.out.log +476 -0
- attnserver.run_attnserver.slurm.sh.343239.err.log +486 -0
- attnserver.run_attnserver.slurm.sh.343239.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343240.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343240.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343243.out.log +85 -0
- attnserver.run_attnserver.slurm.sh.343244.err.log +40 -0
- attnserver.run_attnserver.slurm.sh.343244.out.log +720 -0
- attnserver.run_attnserver.slurm.sh.343248.err.log +668 -0
- attnserver.run_attnserver.slurm.sh.343248.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343261.err.log +143 -0
- attnserver.run_attnserver.slurm.sh.343261.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343262.err.log +217 -0
- attnserver.run_attnserver.slurm.sh.343262.out.log +0 -0
attnserver.run_attnserver.slurm.sh.343207.out.log
CHANGED
|
@@ -19621,3 +19621,73 @@ batch tensor after cp: position_ids torch.Size([1, 131072])
|
|
| 19621 |
Start exporting trace 4
|
| 19622 |
Done exporting trace 4
|
| 19623 |
[2025-06-21 22:06:58] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 124682.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19621 |
Start exporting trace 4
|
| 19622 |
Done exporting trace 4
|
| 19623 |
[2025-06-21 22:06:58] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 124682.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 19624 |
+
batch tensor: tokens torch.Size([1, 131072])
|
| 19625 |
+
batch tensor: labels torch.Size([1, 131072])
|
| 19626 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
| 19627 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19628 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
| 19629 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
| 19630 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
| 19631 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
| 19632 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19633 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
| 19634 |
+
batch tensor: tokens torch.Size([1, 131072])
|
| 19635 |
+
batch tensor: labels torch.Size([1, 131072])
|
| 19636 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
| 19637 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19638 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
| 19639 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
| 19640 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
| 19641 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
| 19642 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19643 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
| 19644 |
+
batch tensor: tokens torch.Size([1, 131072])
|
| 19645 |
+
batch tensor: labels torch.Size([1, 131072])
|
| 19646 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
| 19647 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19648 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
| 19649 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
| 19650 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
| 19651 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
| 19652 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19653 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
| 19654 |
+
batch tensor: tokens torch.Size([1, 131072])
|
| 19655 |
+
batch tensor: labels torch.Size([1, 131072])
|
| 19656 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
| 19657 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19658 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
| 19659 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
| 19660 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
| 19661 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
| 19662 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19663 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
| 19664 |
+
batch tensor: tokens torch.Size([1, 131072])
|
| 19665 |
+
batch tensor: labels torch.Size([1, 131072])
|
| 19666 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
| 19667 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19668 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
| 19669 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
| 19670 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
| 19671 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
| 19672 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19673 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
| 19674 |
+
batch tensor: tokens torch.Size([1, 131072])
|
| 19675 |
+
batch tensor: labels torch.Size([1, 131072])
|
| 19676 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
| 19677 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19678 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
| 19679 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
| 19680 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
| 19681 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
| 19682 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19683 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
| 19684 |
+
batch tensor: tokens torch.Size([1, 131072])
|
| 19685 |
+
batch tensor: labels torch.Size([1, 131072])
|
| 19686 |
+
batch tensor: loss_mask torch.Size([1, 131072])
|
| 19687 |
+
batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19688 |
+
batch tensor: position_ids torch.Size([1, 131072])
|
| 19689 |
+
batch tensor after cp: tokens torch.Size([1, 131072])
|
| 19690 |
+
batch tensor after cp: labels torch.Size([1, 131072])
|
| 19691 |
+
batch tensor after cp: loss_mask torch.Size([1, 131072])
|
| 19692 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 131072, 131072])
|
| 19693 |
+
batch tensor after cp: position_ids torch.Size([1, 131072])
|
attnserver.run_attnserver.slurm.sh.343213.out.log
CHANGED
|
@@ -55966,3 +55966,5 @@ batch tensor after cp: labels torch.Size([1, 16384])
|
|
| 55966 |
batch tensor after cp: loss_mask torch.Size([1, 16384])
|
| 55967 |
batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
|
| 55968 |
batch tensor after cp: position_ids torch.Size([1, 16384])
|
|
|
|
|
|
|
|
|
| 55966 |
batch tensor after cp: loss_mask torch.Size([1, 16384])
|
| 55967 |
batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
|
| 55968 |
batch tensor after cp: position_ids torch.Size([1, 16384])
|
| 55969 |
+
Start exporting trace 0
|
| 55970 |
+
Done exporting trace 0
|
attnserver.run_attnserver.slurm.sh.343214.err.log
CHANGED
|
@@ -74500,3 +74500,433 @@ W0621 22:07:22.256000 4123000 site-packages/torch/distributed/elastic/multiproce
|
|
| 74500 |
W0621 22:07:22.257000 4123000 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 4123074 closing signal SIGTERM
|
| 74501 |
W0621 22:07:22.261000 4123000 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 4123075 closing signal SIGTERM
|
| 74502 |
W0621 22:07:22.261000 4123000 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 4123076 closing signal SIGTERM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74500 |
W0621 22:07:22.257000 4123000 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 4123074 closing signal SIGTERM
|
| 74501 |
W0621 22:07:22.261000 4123000 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 4123075 closing signal SIGTERM
|
| 74502 |
W0621 22:07:22.261000 4123000 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 4123076 closing signal SIGTERM
|
| 74503 |
+
W0621 22:07:51.740000 1726601 site-packages/torch/distributed/elastic/multiprocessing/api.py:919] Unable to shutdown process 1726674 via 15, forcefully exiting via 9
|
| 74504 |
+
W0621 22:07:51.959000 550674 site-packages/torch/distributed/elastic/multiprocessing/api.py:919] Unable to shutdown process 550744 via 15, forcefully exiting via 9
|
| 74505 |
+
W0621 22:07:52.254000 2005013 site-packages/torch/distributed/elastic/multiprocessing/api.py:919] Unable to shutdown process 2005094 via 15, forcefully exiting via 9
|
| 74506 |
+
W0621 22:07:52.262000 4123000 site-packages/torch/distributed/elastic/multiprocessing/api.py:919] Unable to shutdown process 4123070 via 15, forcefully exiting via 9
|
| 74507 |
+
E0621 22:07:54.732000 1726601 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 6 (pid: 1726676) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 74508 |
+
Traceback (most recent call last):
|
| 74509 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
| 74510 |
+
File "<frozen runpy>", line 88, in _run_code
|
| 74511 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
| 74512 |
+
main()
|
| 74513 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
| 74514 |
+
return arg(*args, **kwargs)
|
| 74515 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 74516 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
| 74517 |
+
launch(args)
|
| 74518 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
| 74519 |
+
run(args)
|
| 74520 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
| 74521 |
+
elastic_launch(
|
| 74522 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
| 74523 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 74524 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 74525 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
| 74526 |
+
raise ChildFailedError(
|
| 74527 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 74528 |
+
============================================================
|
| 74529 |
+
./pretrain_gpt_profile.py FAILED
|
| 74530 |
+
------------------------------------------------------------
|
| 74531 |
+
Failures:
|
| 74532 |
+
<NO_OTHER_FAILURES>
|
| 74533 |
+
------------------------------------------------------------
|
| 74534 |
+
Root Cause (first observed failure):
|
| 74535 |
+
[0]:
|
| 74536 |
+
time : 2025-06-21_22:07:21
|
| 74537 |
+
host : fs-mbz-gpu-455
|
| 74538 |
+
rank : 14 (local_rank: 6)
|
| 74539 |
+
exitcode : 1 (pid: 1726676)
|
| 74540 |
+
error_file: <N/A>
|
| 74541 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 74542 |
+
============================================================
|
| 74543 |
+
E0621 22:07:55.034000 2005013 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 2005097) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 74544 |
+
Traceback (most recent call last):
|
| 74545 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
| 74546 |
+
File "<frozen runpy>", line 88, in _run_code
|
| 74547 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
| 74548 |
+
main()
|
| 74549 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
| 74550 |
+
return arg(*args, **kwargs)
|
| 74551 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 74552 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
| 74553 |
+
launch(args)
|
| 74554 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
| 74555 |
+
run(args)
|
| 74556 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
| 74557 |
+
elastic_launch(
|
| 74558 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
| 74559 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 74560 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 74561 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
| 74562 |
+
raise ChildFailedError(
|
| 74563 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 74564 |
+
============================================================
|
| 74565 |
+
./pretrain_gpt_profile.py FAILED
|
| 74566 |
+
------------------------------------------------------------
|
| 74567 |
+
Failures:
|
| 74568 |
+
<NO_OTHER_FAILURES>
|
| 74569 |
+
------------------------------------------------------------
|
| 74570 |
+
Root Cause (first observed failure):
|
| 74571 |
+
[0]:
|
| 74572 |
+
time : 2025-06-21_22:07:22
|
| 74573 |
+
host : fs-mbz-gpu-404
|
| 74574 |
+
rank : 7 (local_rank: 7)
|
| 74575 |
+
exitcode : 1 (pid: 2005097)
|
| 74576 |
+
error_file: <N/A>
|
| 74577 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 74578 |
+
============================================================
|
| 74579 |
+
+ set +x
|
| 74580 |
+
+ set +x
|
| 74581 |
+
W0621 22:07:59.107000 550674 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-854_550674_0' has failed to send a keep-alive heartbeat to the rendezvous '343214' due to an error of type RendezvousConnectionError.
|
| 74582 |
+
W0621 22:07:59.167000 4123000 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-885_4123000_0' has failed to send a keep-alive heartbeat to the rendezvous '343214' due to an error of type RendezvousConnectionError.
|
| 74583 |
+
W0621 22:08:00.255000 4123000 site-packages/torch/distributed/elastic/multiprocessing/api.py:919] Unable to shutdown process 4123074 via 15, forcefully exiting via 9
|
| 74584 |
+
E0621 22:08:00.264000 4123000 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 4123077) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 74585 |
+
[W621 22:08:00.632629656 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-885]:47432, remote=[fs-mbz-gpu-404]:29500): Broken pipe
|
| 74586 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
| 74587 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14c62df785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
| 74588 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14c61725aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74589 |
+
frame #2: <unknown function> + 0x5baa358 (0x14c61725c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74590 |
+
frame #3: <unknown function> + 0x5babb3e (0x14c61725db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74591 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x14c617257ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74592 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x14c617257ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74593 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x14c617258f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74594 |
+
frame #7: <unknown function> + 0xc0f526 (0x14c62658b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74595 |
+
frame #8: <unknown function> + 0x37f17d (0x14c625cfb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74596 |
+
<omitting python frames>
|
| 74597 |
+
frame #26: <unknown function> + 0x29d90 (0x14c62f28fd90 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74598 |
+
frame #27: __libc_start_main + 0x80 (0x14c62f28fe40 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74599 |
+
|
| 74600 |
+
W0621 22:08:00.289000 4123000 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-885_4123000_0' has failed to shutdown the rendezvous '343214' due to an error of type RendezvousConnectionError.
|
| 74601 |
+
[W621 22:08:00.658907883 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-885]:47432, remote=[fs-mbz-gpu-404]:29500): Broken pipe
|
| 74602 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
| 74603 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14c62df785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
| 74604 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14c61725aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74605 |
+
frame #2: <unknown function> + 0x5baa358 (0x14c61725c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74606 |
+
frame #3: <unknown function> + 0x5babb3e (0x14c61725db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74607 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x14c617257ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74608 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x14c617257ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74609 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x14c617258f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74610 |
+
frame #7: <unknown function> + 0xc0f526 (0x14c62658b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74611 |
+
frame #8: <unknown function> + 0x37f17d (0x14c625cfb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74612 |
+
<omitting python frames>
|
| 74613 |
+
frame #26: <unknown function> + 0x29d90 (0x14c62f28fd90 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74614 |
+
frame #27: __libc_start_main + 0x80 (0x14c62f28fe40 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74615 |
+
|
| 74616 |
+
W0621 22:08:00.302000 4123000 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-885_4123000_0' has failed to shutdown the rendezvous '343214' due to an error of type RendezvousConnectionError.
|
| 74617 |
+
[W621 22:08:00.670678056 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-885]:47432, remote=[fs-mbz-gpu-404]:29500): Broken pipe
|
| 74618 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
| 74619 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14c62df785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
| 74620 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14c61725aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74621 |
+
frame #2: <unknown function> + 0x5baa358 (0x14c61725c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74622 |
+
frame #3: <unknown function> + 0x5babb3e (0x14c61725db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74623 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x14c617257ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74624 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x14c617257ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74625 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x14c617258f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74626 |
+
frame #7: <unknown function> + 0xc0f526 (0x14c62658b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74627 |
+
frame #8: <unknown function> + 0x37f17d (0x14c625cfb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74628 |
+
<omitting python frames>
|
| 74629 |
+
frame #26: <unknown function> + 0x29d90 (0x14c62f28fd90 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74630 |
+
frame #27: __libc_start_main + 0x80 (0x14c62f28fe40 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74631 |
+
|
| 74632 |
+
W0621 22:08:00.313000 4123000 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-885_4123000_0' has failed to shutdown the rendezvous '343214' due to an error of type RendezvousConnectionError.
|
| 74633 |
+
Traceback (most recent call last):
|
| 74634 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
| 74635 |
+
File "<frozen runpy>", line 88, in _run_code
|
| 74636 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
| 74637 |
+
main()
|
| 74638 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
| 74639 |
+
return arg(*args, **kwargs)
|
| 74640 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 74641 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
| 74642 |
+
launch(args)
|
| 74643 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
| 74644 |
+
run(args)
|
| 74645 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
| 74646 |
+
elastic_launch(
|
| 74647 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
| 74648 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 74649 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 74650 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
| 74651 |
+
raise ChildFailedError(
|
| 74652 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 74653 |
+
============================================================
|
| 74654 |
+
./pretrain_gpt_profile.py FAILED
|
| 74655 |
+
------------------------------------------------------------
|
| 74656 |
+
Failures:
|
| 74657 |
+
<NO_OTHER_FAILURES>
|
| 74658 |
+
------------------------------------------------------------
|
| 74659 |
+
Root Cause (first observed failure):
|
| 74660 |
+
[0]:
|
| 74661 |
+
time : 2025-06-21_22:07:22
|
| 74662 |
+
host : fs-mbz-gpu-885
|
| 74663 |
+
rank : 31 (local_rank: 7)
|
| 74664 |
+
exitcode : 1 (pid: 4123077)
|
| 74665 |
+
error_file: <N/A>
|
| 74666 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 74667 |
+
============================================================
|
| 74668 |
+
+ set +x
|
| 74669 |
+
[W621 22:08:04.480621105 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-854]:52120, remote=[fs-mbz-gpu-404]:29500): Broken pipe
|
| 74670 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
| 74671 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x1481bcb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
| 74672 |
+
frame #1: <unknown function> + 0x5ba8afe (0x1481a5e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74673 |
+
frame #2: <unknown function> + 0x5baa358 (0x1481a5e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74674 |
+
frame #3: <unknown function> + 0x5babb3e (0x1481a5e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74675 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x1481a5e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74676 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x1481a5e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74677 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x1481a5e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74678 |
+
frame #7: <unknown function> + 0xc0f526 (0x1481b518b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74679 |
+
frame #8: <unknown function> + 0x37f17d (0x1481b48fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74680 |
+
<omitting python frames>
|
| 74681 |
+
frame #17: <unknown function> + 0x94ac3 (0x1481bdef1ac3 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74682 |
+
frame #18: <unknown function> + 0x126850 (0x1481bdf83850 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74683 |
+
|
| 74684 |
+
W0621 22:08:04.118000 550674 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-854_550674_0' has failed to send a keep-alive heartbeat to the rendezvous '343214' due to an error of type RendezvousConnectionError.
|
| 74685 |
+
E0621 22:08:04.815000 550674 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 6 (pid: 550750) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 74686 |
+
[W621 22:08:04.188180780 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-854]:52120, remote=[fs-mbz-gpu-404]:29500): Broken pipe
|
| 74687 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
| 74688 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x1481bcb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
| 74689 |
+
frame #1: <unknown function> + 0x5ba8afe (0x1481a5e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74690 |
+
frame #2: <unknown function> + 0x5baa358 (0x1481a5e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74691 |
+
frame #3: <unknown function> + 0x5babb3e (0x1481a5e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74692 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x1481a5e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74693 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x1481a5e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74694 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x1481a5e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74695 |
+
frame #7: <unknown function> + 0xc0f526 (0x1481b518b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74696 |
+
frame #8: <unknown function> + 0x37f17d (0x1481b48fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74697 |
+
<omitting python frames>
|
| 74698 |
+
frame #26: <unknown function> + 0x29d90 (0x1481bde86d90 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74699 |
+
frame #27: __libc_start_main + 0x80 (0x1481bde86e40 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74700 |
+
|
| 74701 |
+
W0621 22:08:04.831000 550674 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-854_550674_0' has failed to shutdown the rendezvous '343214' due to an error of type RendezvousConnectionError.
|
| 74702 |
+
[W621 22:08:04.205215967 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-854]:52120, remote=[fs-mbz-gpu-404]:29500): Broken pipe
|
| 74703 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
| 74704 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x1481bcb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
| 74705 |
+
frame #1: <unknown function> + 0x5ba8afe (0x1481a5e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74706 |
+
frame #2: <unknown function> + 0x5baa358 (0x1481a5e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74707 |
+
frame #3: <unknown function> + 0x5babb3e (0x1481a5e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74708 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x1481a5e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74709 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x1481a5e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74710 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x1481a5e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74711 |
+
frame #7: <unknown function> + 0xc0f526 (0x1481b518b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74712 |
+
frame #8: <unknown function> + 0x37f17d (0x1481b48fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74713 |
+
<omitting python frames>
|
| 74714 |
+
frame #26: <unknown function> + 0x29d90 (0x1481bde86d90 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74715 |
+
frame #27: __libc_start_main + 0x80 (0x1481bde86e40 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74716 |
+
|
| 74717 |
+
W0621 22:08:04.850000 550674 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-854_550674_0' has failed to shutdown the rendezvous '343214' due to an error of type RendezvousConnectionError.
|
| 74718 |
+
[W621 22:08:04.223731210 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-854]:52120, remote=[fs-mbz-gpu-404]:29500): Broken pipe
|
| 74719 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
| 74720 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x1481bcb785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
| 74721 |
+
frame #1: <unknown function> + 0x5ba8afe (0x1481a5e5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74722 |
+
frame #2: <unknown function> + 0x5baa358 (0x1481a5e5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74723 |
+
frame #3: <unknown function> + 0x5babb3e (0x1481a5e5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74724 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x1481a5e57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74725 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x1481a5e57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74726 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x1481a5e58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 74727 |
+
frame #7: <unknown function> + 0xc0f526 (0x1481b518b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74728 |
+
frame #8: <unknown function> + 0x37f17d (0x1481b48fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 74729 |
+
<omitting python frames>
|
| 74730 |
+
frame #26: <unknown function> + 0x29d90 (0x1481bde86d90 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74731 |
+
frame #27: __libc_start_main + 0x80 (0x1481bde86e40 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 74732 |
+
|
| 74733 |
+
W0621 22:08:04.861000 550674 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-854_550674_0' has failed to shutdown the rendezvous '343214' due to an error of type RendezvousConnectionError.
|
| 74734 |
+
Traceback (most recent call last):
|
| 74735 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
| 74736 |
+
File "<frozen runpy>", line 88, in _run_code
|
| 74737 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
| 74738 |
+
main()
|
| 74739 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
| 74740 |
+
return arg(*args, **kwargs)
|
| 74741 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 74742 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
| 74743 |
+
launch(args)
|
| 74744 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
| 74745 |
+
run(args)
|
| 74746 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
| 74747 |
+
elastic_launch(
|
| 74748 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
| 74749 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 74750 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 74751 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
| 74752 |
+
raise ChildFailedError(
|
| 74753 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 74754 |
+
============================================================
|
| 74755 |
+
./pretrain_gpt_profile.py FAILED
|
| 74756 |
+
------------------------------------------------------------
|
| 74757 |
+
Failures:
|
| 74758 |
+
<NO_OTHER_FAILURES>
|
| 74759 |
+
------------------------------------------------------------
|
| 74760 |
+
Root Cause (first observed failure):
|
| 74761 |
+
[0]:
|
| 74762 |
+
time : 2025-06-21_22:07:21
|
| 74763 |
+
host : fs-mbz-gpu-854
|
| 74764 |
+
rank : 22 (local_rank: 6)
|
| 74765 |
+
exitcode : 1 (pid: 550750)
|
| 74766 |
+
error_file: <N/A>
|
| 74767 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 74768 |
+
============================================================
|
| 74769 |
+
+ set +x
|
| 74770 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
| 74771 |
+
+ export PROF_CTX_LENGTH=131072
|
| 74772 |
+
+ PROF_CTX_LENGTH=131072
|
| 74773 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L131072*tp4.cp8.bs2.json'
|
| 74774 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L131072*tp4.cp8.bs2.json' ']'
|
| 74775 |
+
+ echo 'Running ctx_length=131072, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=2'
|
| 74776 |
+
+ srun bash ./attnserver.sh
|
| 74777 |
+
+ which python3
|
| 74778 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343214 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-404:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 4 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 131072 --max-position-embeddings 131072 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 74779 |
+
+ which python3
|
| 74780 |
+
+ which python3
|
| 74781 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343214 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-404:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 4 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 131072 --max-position-embeddings 131072 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 74782 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343214 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-404:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 4 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 131072 --max-position-embeddings 131072 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 74783 |
+
+ which python3
|
| 74784 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343214 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-404:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 4 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 131072 --max-position-embeddings 131072 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 74785 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 74786 |
+
and will be removed in future. Use torchrun.
|
| 74787 |
+
Note that --use-env is set by default in torchrun.
|
| 74788 |
+
If your script expects `--local-rank` argument to be set, please
|
| 74789 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 74790 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 74791 |
+
further instructions
|
| 74792 |
+
|
| 74793 |
+
main()
|
| 74794 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 74795 |
+
and will be removed in future. Use torchrun.
|
| 74796 |
+
Note that --use-env is set by default in torchrun.
|
| 74797 |
+
If your script expects `--local-rank` argument to be set, please
|
| 74798 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 74799 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 74800 |
+
further instructions
|
| 74801 |
+
|
| 74802 |
+
main()
|
| 74803 |
+
W0621 22:08:07.950000 2008360 site-packages/torch/distributed/run.py:766]
|
| 74804 |
+
W0621 22:08:07.950000 2008360 site-packages/torch/distributed/run.py:766] *****************************************
|
| 74805 |
+
W0621 22:08:07.950000 2008360 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 74806 |
+
W0621 22:08:07.950000 2008360 site-packages/torch/distributed/run.py:766] *****************************************
|
| 74807 |
+
W0621 22:08:07.954000 1730015 site-packages/torch/distributed/run.py:766]
|
| 74808 |
+
W0621 22:08:07.954000 1730015 site-packages/torch/distributed/run.py:766] *****************************************
|
| 74809 |
+
W0621 22:08:07.954000 1730015 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 74810 |
+
W0621 22:08:07.954000 1730015 site-packages/torch/distributed/run.py:766] *****************************************
|
| 74811 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 74812 |
+
and will be removed in future. Use torchrun.
|
| 74813 |
+
Note that --use-env is set by default in torchrun.
|
| 74814 |
+
If your script expects `--local-rank` argument to be set, please
|
| 74815 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 74816 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 74817 |
+
further instructions
|
| 74818 |
+
|
| 74819 |
+
main()
|
| 74820 |
+
W0621 22:08:08.022000 4126284 site-packages/torch/distributed/run.py:766]
|
| 74821 |
+
W0621 22:08:08.022000 4126284 site-packages/torch/distributed/run.py:766] *****************************************
|
| 74822 |
+
W0621 22:08:08.022000 4126284 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 74823 |
+
W0621 22:08:08.022000 4126284 site-packages/torch/distributed/run.py:766] *****************************************
|
| 74824 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 74825 |
+
and will be removed in future. Use torchrun.
|
| 74826 |
+
Note that --use-env is set by default in torchrun.
|
| 74827 |
+
If your script expects `--local-rank` argument to be set, please
|
| 74828 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 74829 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 74830 |
+
further instructions
|
| 74831 |
+
|
| 74832 |
+
main()
|
| 74833 |
+
W0621 22:08:08.299000 554106 site-packages/torch/distributed/run.py:766]
|
| 74834 |
+
W0621 22:08:08.299000 554106 site-packages/torch/distributed/run.py:766] *****************************************
|
| 74835 |
+
W0621 22:08:08.299000 554106 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 74836 |
+
W0621 22:08:08.299000 554106 site-packages/torch/distributed/run.py:766] *****************************************
|
| 74837 |
+
[rank7]:[W621 22:08:32.186204054 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74838 |
+
[rank2]:[W621 22:08:32.186244524 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74839 |
+
[rank11]:[W621 22:08:32.894358656 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74840 |
+
[rank6]:[W621 22:08:32.187193432 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74841 |
+
[rank12]:[W621 22:08:32.894950794 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74842 |
+
[rank4]:[W621 22:08:32.187222723 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74843 |
+
[rank1]:[W621 22:08:32.187413964 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74844 |
+
[rank3]:[W621 22:08:32.187448488 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74845 |
+
[rank20]:[W621 22:08:32.714632159 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74846 |
+
[rank5]:[W621 22:08:32.196212744 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74847 |
+
[rank23]:[W621 22:08:32.720534902 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74848 |
+
[rank18]:[W621 22:08:32.720562754 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74849 |
+
[rank15]:[W621 22:08:32.904971151 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74850 |
+
[rank28]:[W621 22:08:32.717096970 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74851 |
+
[rank30]:[W621 22:08:32.717098646 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74852 |
+
[rank19]:[W621 22:08:32.720590798 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74853 |
+
[rank14]:[W621 22:08:32.905056735 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74854 |
+
[rank25]:[W621 22:08:32.717205166 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74855 |
+
[rank31]:[W621 22:08:32.717224633 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74856 |
+
[rank27]:[W621 22:08:32.717255676 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74857 |
+
[rank17]:[W621 22:08:32.720619891 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74858 |
+
[rank13]:[W621 22:08:32.905529657 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74859 |
+
[rank26]:[W621 22:08:32.717257193 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74860 |
+
[rank29]:[W621 22:08:32.717270982 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74861 |
+
[rank22]:[W621 22:08:32.721046214 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74862 |
+
[rank9]:[W621 22:08:32.905614860 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74863 |
+
[rank21]:[W621 22:08:32.723772811 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74864 |
+
[rank10]:[W621 22:08:32.905739596 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74865 |
+
[rank16]:[W621 22:08:32.815873645 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74866 |
+
[rank8]:[W621 22:08:32.004968041 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74867 |
+
[rank24]:[W621 22:08:32.819901279 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74868 |
+
[rank0]:[W621 22:08:32.366872094 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 74869 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74870 |
+
warnings.warn(
|
| 74871 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74872 |
+
warnings.warn(
|
| 74873 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74874 |
+
warnings.warn(
|
| 74875 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74876 |
+
warnings.warn(
|
| 74877 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74878 |
+
warnings.warn(
|
| 74879 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74880 |
+
warnings.warn(
|
| 74881 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74882 |
+
warnings.warn(
|
| 74883 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74884 |
+
warnings.warn(
|
| 74885 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74886 |
+
warnings.warn(
|
| 74887 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74888 |
+
warnings.warn(
|
| 74889 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74890 |
+
warnings.warn(
|
| 74891 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74892 |
+
warnings.warn(
|
| 74893 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74894 |
+
warnings.warn(
|
| 74895 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74896 |
+
warnings.warn(
|
| 74897 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74898 |
+
warnings.warn(
|
| 74899 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74900 |
+
warnings.warn(
|
| 74901 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74902 |
+
warnings.warn(
|
| 74903 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74904 |
+
warnings.warn(
|
| 74905 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74906 |
+
warnings.warn(
|
| 74907 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74908 |
+
warnings.warn(
|
| 74909 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74910 |
+
warnings.warn(
|
| 74911 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74912 |
+
warnings.warn(
|
| 74913 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74914 |
+
warnings.warn(
|
| 74915 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74916 |
+
warnings.warn(
|
| 74917 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74918 |
+
warnings.warn(
|
| 74919 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74920 |
+
warnings.warn(
|
| 74921 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74922 |
+
warnings.warn(
|
| 74923 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74924 |
+
warnings.warn(
|
| 74925 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74926 |
+
warnings.warn(
|
| 74927 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74928 |
+
warnings.warn(
|
| 74929 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74930 |
+
warnings.warn(
|
| 74931 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 74932 |
+
warnings.warn(
|
attnserver.run_attnserver.slurm.sh.343214.out.log
CHANGED
|
@@ -42316,3 +42316,595 @@ WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU
|
|
| 42316 |
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 49.17 GiB is free. Including non-PyTorch memory, this process has 90.26 GiB memory in use. Of the allocated memory 85.20 GiB is allocated by PyTorch, and 1.31 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
| 42317 |
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 49.51 GiB is free. Including non-PyTorch memory, this process has 89.92 GiB memory in use. Of the allocated memory 85.20 GiB is allocated by PyTorch, and 1007.16 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 42318 |
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 49.51 GiB is free. Including non-PyTorch memory, this process has 89.92 GiB memory in use. Of the allocated memory 85.20 GiB is allocated by PyTorch, and 1007.16 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42316 |
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 49.17 GiB is free. Including non-PyTorch memory, this process has 90.26 GiB memory in use. Of the allocated memory 85.20 GiB is allocated by PyTorch, and 1.31 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
| 42317 |
WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 49.51 GiB is free. Including non-PyTorch memory, this process has 89.92 GiB memory in use. Of the allocated memory 85.20 GiB is allocated by PyTorch, and 1007.16 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 42318 |
['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 49.51 GiB is free. Including non-PyTorch memory, this process has 89.92 GiB memory in use. Of the allocated memory 85.20 GiB is allocated by PyTorch, and 1007.16 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']
|
| 42319 |
+
Running ctx_length=131072, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=2
|
| 42320 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 42321 |
+
--------------------------------
|
| 42322 |
+
CTX_LENGTH: 131072
|
| 42323 |
+
TP_SIZE: 4
|
| 42324 |
+
CP_SIZE: 8
|
| 42325 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 42326 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 42327 |
+
--------------------------------
|
| 42328 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 42329 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 42330 |
+
--------------------------------
|
| 42331 |
+
CTX_LENGTH: 131072
|
| 42332 |
+
TP_SIZE: 4
|
| 42333 |
+
CP_SIZE: 8
|
| 42334 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 42335 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 42336 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 42337 |
+
--------------------------------
|
| 42338 |
+
--------------------------------
|
| 42339 |
+
CTX_LENGTH: 131072
|
| 42340 |
+
TP_SIZE: 4
|
| 42341 |
+
CP_SIZE: 8
|
| 42342 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 42343 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 42344 |
+
--------------------------------
|
| 42345 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 42346 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 42347 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 42348 |
+
--------------------------------
|
| 42349 |
+
CTX_LENGTH: 131072
|
| 42350 |
+
TP_SIZE: 4
|
| 42351 |
+
CP_SIZE: 8
|
| 42352 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 42353 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 42354 |
+
--------------------------------
|
| 42355 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 42356 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42357 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42358 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42359 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42360 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42361 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42362 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42363 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42364 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42365 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42366 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42367 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42368 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42369 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42370 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42371 |
+
WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written.
|
| 42372 |
+
WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it
|
| 42373 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42374 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42375 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42376 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42377 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42378 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42379 |
+
using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0
|
| 42380 |
+
Number of virtual stages per pipeline stage: None
|
| 42381 |
+
WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used
|
| 42382 |
+
using torch.float16 for parameters ...
|
| 42383 |
+
------------------------ arguments ------------------------
|
| 42384 |
+
account_for_embedding_in_pipeline_split ......... False
|
| 42385 |
+
account_for_loss_in_pipeline_split .............. False
|
| 42386 |
+
accumulate_allreduce_grads_in_fp32 .............. False
|
| 42387 |
+
adam_beta1 ...................................... 0.9
|
| 42388 |
+
adam_beta2 ...................................... 0.999
|
| 42389 |
+
adam_eps ........................................ 1e-08
|
| 42390 |
+
add_bias_linear ................................. True
|
| 42391 |
+
add_position_embedding .......................... True
|
| 42392 |
+
add_qkv_bias .................................... True
|
| 42393 |
+
adlr_autoresume ................................. False
|
| 42394 |
+
adlr_autoresume_interval ........................ 1000
|
| 42395 |
+
align_grad_reduce ............................... True
|
| 42396 |
+
align_param_gather .............................. False
|
| 42397 |
+
app_tag_run_name ................................ None
|
| 42398 |
+
app_tag_run_version ............................. 0.0.0
|
| 42399 |
+
apply_layernorm_1p .............................. False
|
| 42400 |
+
apply_query_key_layer_scaling ................... False
|
| 42401 |
+
apply_residual_connection_post_layernorm ........ False
|
| 42402 |
+
apply_rope_fusion ............................... False
|
| 42403 |
+
async_save ...................................... None
|
| 42404 |
+
async_tensor_model_parallel_allreduce ........... True
|
| 42405 |
+
attention_backend ............................... AttnBackend.auto
|
| 42406 |
+
attention_dropout ............................... 0.1
|
| 42407 |
+
attention_softmax_in_fp32 ....................... False
|
| 42408 |
+
auto_detect_ckpt_format ......................... False
|
| 42409 |
+
barrier_with_L1_time ............................ True
|
| 42410 |
+
bert_binary_head ................................ True
|
| 42411 |
+
bert_embedder_type .............................. megatron
|
| 42412 |
+
bert_load ....................................... None
|
| 42413 |
+
bf16 ............................................ False
|
| 42414 |
+
bias_dropout_fusion ............................. True
|
| 42415 |
+
bias_gelu_fusion ................................ True
|
| 42416 |
+
bias_swiglu_fusion .............................. True
|
| 42417 |
+
biencoder_projection_dim ........................ 0
|
| 42418 |
+
biencoder_shared_query_context_model ............ False
|
| 42419 |
+
block_data_path ................................. None
|
| 42420 |
+
calc_ft_timeouts ................................ False
|
| 42421 |
+
calculate_per_token_loss ........................ False
|
| 42422 |
+
check_for_large_grads ........................... False
|
| 42423 |
+
check_for_nan_in_loss_and_grad .................. False
|
| 42424 |
+
check_for_spiky_loss ............................ False
|
| 42425 |
+
check_weight_hash_across_dp_replicas_interval ... None
|
| 42426 |
+
ckpt_assume_constant_structure .................. False
|
| 42427 |
+
ckpt_convert_format ............................. None
|
| 42428 |
+
ckpt_convert_save ............................... None
|
| 42429 |
+
ckpt_convert_update_legacy_dist_opt_format ...... False
|
| 42430 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42431 |
+
ckpt_format ..................................... torch_dist
|
| 42432 |
+
ckpt_fully_parallel_load ........................ False
|
| 42433 |
+
ckpt_fully_parallel_save ........................ True
|
| 42434 |
+
ckpt_fully_parallel_save_deprecated ............. False
|
| 42435 |
+
ckpt_step ....................................... None
|
| 42436 |
+
classes_fraction ................................ 1.0
|
| 42437 |
+
clip_grad ....................................... 1.0
|
| 42438 |
+
clone_scatter_output_in_embedding ............... True
|
| 42439 |
+
config_logger_dir ...............................
|
| 42440 |
+
consumed_train_samples .......................... 0
|
| 42441 |
+
consumed_valid_samples .......................... 0
|
| 42442 |
+
context_parallel_size ........................... 8
|
| 42443 |
+
cp_comm_type .................................... ['p2p']
|
| 42444 |
+
create_attention_mask_in_dataloader ............. True
|
| 42445 |
+
cross_entropy_fusion_impl ....................... native
|
| 42446 |
+
cross_entropy_loss_fusion ....................... False
|
| 42447 |
+
cuda_graph_scope ................................ full
|
| 42448 |
+
cuda_graph_warmup_steps ......................... 3
|
| 42449 |
+
data_args_path .................................. None
|
| 42450 |
+
data_cache_path ................................. None
|
| 42451 |
+
data_parallel_random_init ....................... False
|
| 42452 |
+
data_parallel_sharding_strategy ................. no_shard
|
| 42453 |
+
data_parallel_size .............................. 1
|
| 42454 |
+
data_path ....................................... None
|
| 42455 |
+
data_per_class_fraction ......................... 1.0
|
| 42456 |
+
data_sharding ................................... True
|
| 42457 |
+
dataloader_type ................................. single
|
| 42458 |
+
ddp_average_in_collective ....................... False
|
| 42459 |
+
ddp_bucket_size ................................. None
|
| 42460 |
+
ddp_num_buckets ................................. None
|
| 42461 |
+
ddp_pad_buckets_for_high_nccl_busbw ............. False
|
| 42462 |
+
decoder_first_pipeline_num_layers ............... None
|
| 42463 |
+
decoder_last_pipeline_num_layers ................ None
|
| 42464 |
+
decoder_num_layers .............................. None
|
| 42465 |
+
decoder_seq_length .............................. None
|
| 42466 |
+
decoupled_lr .................................... None
|
| 42467 |
+
decoupled_min_lr ................................ None
|
| 42468 |
+
decrease_batch_size_if_needed ................... False
|
| 42469 |
+
defer_embedding_wgrad_compute ................... False
|
| 42470 |
+
deprecated_use_mcore_models ..................... False
|
| 42471 |
+
deterministic_mode .............................. False
|
| 42472 |
+
dino_bottleneck_size ............................ 256
|
| 42473 |
+
dino_freeze_last_layer .......................... 1
|
| 42474 |
+
dino_head_hidden_size ........................... 2048
|
| 42475 |
+
dino_local_crops_number ......................... 10
|
| 42476 |
+
dino_local_img_size ............................. 96
|
| 42477 |
+
dino_norm_last_layer ............................ False
|
| 42478 |
+
dino_teacher_temp ............................... 0.07
|
| 42479 |
+
dino_warmup_teacher_temp ........................ 0.04
|
| 42480 |
+
dino_warmup_teacher_temp_epochs ................. 30
|
| 42481 |
+
disable_bf16_reduced_precision_matmul ........... False
|
| 42482 |
+
disable_mamba_mem_eff_path ...................... False
|
| 42483 |
+
disable_straggler_on_startup .................... False
|
| 42484 |
+
dist_ckpt_format_deprecated ..................... None
|
| 42485 |
+
dist_ckpt_strictness ............................ assume_ok_unexpected
|
| 42486 |
+
distribute_saved_activations .................... False
|
| 42487 |
+
distributed_backend ............................. nccl
|
| 42488 |
+
distributed_timeout_minutes ..................... 10
|
| 42489 |
+
embedding_path .................................. None
|
| 42490 |
+
empty_unused_memory_level ....................... 0
|
| 42491 |
+
enable_cuda_graph ............................... False
|
| 42492 |
+
enable_ft_package ............................... False
|
| 42493 |
+
enable_gloo_process_groups ...................... True
|
| 42494 |
+
enable_msc ...................................... True
|
| 42495 |
+
enable_one_logger ............................... True
|
| 42496 |
+
encoder_num_layers .............................. 2
|
| 42497 |
+
encoder_pipeline_model_parallel_size ............ 0
|
| 42498 |
+
encoder_seq_length .............................. 131072
|
| 42499 |
+
encoder_tensor_model_parallel_size .............. 0
|
| 42500 |
+
end_weight_decay ................................ 0.1
|
| 42501 |
+
eod_mask_loss ................................... False
|
| 42502 |
+
error_injection_rate ............................ 0
|
| 42503 |
+
error_injection_type ............................ transient_error
|
| 42504 |
+
eval_interval ................................... 16
|
| 42505 |
+
eval_iters ...................................... 1
|
| 42506 |
+
evidence_data_path .............................. None
|
| 42507 |
+
exit_duration_in_mins ........................... None
|
| 42508 |
+
exit_interval ................................... None
|
| 42509 |
+
exit_on_missing_checkpoint ...................... False
|
| 42510 |
+
exit_signal_handler ............................. False
|
| 42511 |
+
exp_avg_dtype ................................... torch.float32
|
| 42512 |
+
exp_avg_sq_dtype ................................ torch.float32
|
| 42513 |
+
expert_model_parallel_size ...................... 1
|
| 42514 |
+
expert_tensor_parallel_size ..................... 4
|
| 42515 |
+
external_cuda_graph ............................. False
|
| 42516 |
+
ffn_hidden_size ................................. 16384
|
| 42517 |
+
finetune ........................................ False
|
| 42518 |
+
first_last_layers_bf16 .......................... False
|
| 42519 |
+
flash_decode .................................... False
|
| 42520 |
+
fp16 ............................................ True
|
| 42521 |
+
fp16_lm_cross_entropy ........................... False
|
| 42522 |
+
fp32_residual_connection ........................ False
|
| 42523 |
+
fp8 ............................................. None
|
| 42524 |
+
fp8_amax_compute_algo ........................... most_recent
|
| 42525 |
+
fp8_amax_history_len ............................ 1
|
| 42526 |
+
fp8_interval .................................... 1
|
| 42527 |
+
fp8_margin ...................................... 0
|
| 42528 |
+
fp8_param_gather ................................ False
|
| 42529 |
+
fp8_recipe ...................................... delayed
|
| 42530 |
+
fp8_wgrad ....................................... True
|
| 42531 |
+
fsdp_double_buffer .............................. False
|
| 42532 |
+
global_batch_size ............................... 1
|
| 42533 |
+
grad_reduce_in_bf16 ............................. False
|
| 42534 |
+
gradient_accumulation_fusion .................... True
|
| 42535 |
+
gradient_reduce_div_fusion ...................... True
|
| 42536 |
+
group_query_attention ........................... True
|
| 42537 |
+
head_lr_mult .................................... 1.0
|
| 42538 |
+
heterogeneous_layers_config_encoded_json ........ None
|
| 42539 |
+
heterogeneous_layers_config_path ................ None
|
| 42540 |
+
hidden_dropout .................................. 0.1
|
| 42541 |
+
hidden_size ..................................... 4096
|
| 42542 |
+
hierarchical_context_parallel_sizes ............. None
|
| 42543 |
+
high_priority_stream_groups ..................... []
|
| 42544 |
+
hybrid_attention_ratio .......................... 0.0
|
| 42545 |
+
hybrid_mlp_ratio ................................ 0.0
|
| 42546 |
+
hybrid_override_pattern ......................... None
|
| 42547 |
+
hysteresis ...................................... 2
|
| 42548 |
+
ict_head_size ................................... None
|
| 42549 |
+
ict_load ........................................ None
|
| 42550 |
+
img_h ........................................... 224
|
| 42551 |
+
img_w ........................................... 224
|
| 42552 |
+
indexer_batch_size .............................. 128
|
| 42553 |
+
indexer_log_interval ............................ 1000
|
| 42554 |
+
inference_batch_times_seqlen_threshold .......... -1
|
| 42555 |
+
inference_dynamic_batching ...................... False
|
| 42556 |
+
inference_dynamic_batching_buffer_guaranteed_fraction 0.2
|
| 42557 |
+
inference_dynamic_batching_buffer_overflow_factor None
|
| 42558 |
+
inference_dynamic_batching_buffer_size_gb ....... 40.0
|
| 42559 |
+
inference_dynamic_batching_chunk_size ........... 256
|
| 42560 |
+
inference_dynamic_batching_max_requests_override None
|
| 42561 |
+
inference_dynamic_batching_max_tokens_override .. None
|
| 42562 |
+
inference_max_batch_size ........................ 8
|
| 42563 |
+
inference_max_seq_length ........................ 2560
|
| 42564 |
+
inference_rng_tracker ........................... False
|
| 42565 |
+
init_method_std ................................. 0.02
|
| 42566 |
+
init_method_xavier_uniform ...................... False
|
| 42567 |
+
init_model_with_meta_device ..................... False
|
| 42568 |
+
initial_loss_scale .............................. 4294967296
|
| 42569 |
+
inprocess_active_world_size ..................... 32
|
| 42570 |
+
inprocess_barrier_timeout ....................... 120
|
| 42571 |
+
inprocess_completion_timeout .................... 120
|
| 42572 |
+
inprocess_empty_cuda_cache ...................... False
|
| 42573 |
+
inprocess_granularity ........................... node
|
| 42574 |
+
inprocess_hard_timeout .......................... 90
|
| 42575 |
+
inprocess_heartbeat_interval .................... 30
|
| 42576 |
+
inprocess_heartbeat_timeout ..................... 60
|
| 42577 |
+
inprocess_last_call_wait ........................ 1
|
| 42578 |
+
inprocess_max_iterations ........................ None
|
| 42579 |
+
inprocess_monitor_process_interval .............. 1.0
|
| 42580 |
+
inprocess_monitor_thread_interval ............... 1.0
|
| 42581 |
+
inprocess_progress_watchdog_interval ............ 1.0
|
| 42582 |
+
inprocess_restart ............................... False
|
| 42583 |
+
inprocess_soft_timeout .......................... 60
|
| 42584 |
+
inprocess_termination_grace_time ................ 1
|
| 42585 |
+
is_hybrid_model ................................. False
|
| 42586 |
+
iter_per_epoch .................................. 1250
|
| 42587 |
+
iterations_to_skip .............................. []
|
| 42588 |
+
keep_fp8_transpose_cache_when_using_custom_fsdp . False
|
| 42589 |
+
kv_channels ..................................... 64
|
| 42590 |
+
kv_lora_rank .................................... 32
|
| 42591 |
+
lazy_mpu_init ................................... None
|
| 42592 |
+
load ............................................ gpt-checkpoint
|
| 42593 |
+
load_model_opt_format ........................... False
|
| 42594 |
+
local_rank ...................................... 0
|
| 42595 |
+
log_interval .................................... 1
|
| 42596 |
+
log_loss_scale_to_tensorboard ................... True
|
| 42597 |
+
log_memory_to_tensorboard ....................... False
|
| 42598 |
+
log_num_zeros_in_grad ........................... False
|
| 42599 |
+
log_params_norm ................................. False
|
| 42600 |
+
log_progress .................................... False
|
| 42601 |
+
log_straggler ................................... False
|
| 42602 |
+
log_throughput .................................. False
|
| 42603 |
+
log_timers_to_tensorboard ....................... False
|
| 42604 |
+
log_validation_ppl_to_tensorboard ............... False
|
| 42605 |
+
log_world_size_to_tensorboard ................... False
|
| 42606 |
+
logging_level ................................... 0
|
| 42607 |
+
loss_scale ...................................... None
|
| 42608 |
+
loss_scale_window ............................... 1000
|
| 42609 |
+
lr .............................................. 0.0005
|
| 42610 |
+
lr_decay_iters .................................. 150000
|
| 42611 |
+
lr_decay_samples ................................ None
|
| 42612 |
+
lr_decay_style .................................. cosine
|
| 42613 |
+
lr_warmup_fraction .............................. None
|
| 42614 |
+
lr_warmup_init .................................. 0.0
|
| 42615 |
+
lr_warmup_iters ................................. 2
|
| 42616 |
+
lr_warmup_samples ............................... 0
|
| 42617 |
+
lr_wsd_decay_iters .............................. None
|
| 42618 |
+
lr_wsd_decay_samples ............................ None
|
| 42619 |
+
lr_wsd_decay_style .............................. exponential
|
| 42620 |
+
main_grads_dtype ................................ torch.float32
|
| 42621 |
+
main_params_dtype ............................... torch.float32
|
| 42622 |
+
make_vocab_size_divisible_by .................... 128
|
| 42623 |
+
mamba_head_dim .................................. 64
|
| 42624 |
+
mamba_num_groups ................................ 8
|
| 42625 |
+
mamba_num_heads ................................. None
|
| 42626 |
+
mamba_state_dim ................................. 128
|
| 42627 |
+
manual_gc ....................................... False
|
| 42628 |
+
manual_gc_eval .................................. True
|
| 42629 |
+
manual_gc_interval .............................. 0
|
| 42630 |
+
mask_factor ..................................... 1.0
|
| 42631 |
+
mask_prob ....................................... 0.15
|
| 42632 |
+
mask_type ....................................... random
|
| 42633 |
+
masked_softmax_fusion ........................... True
|
| 42634 |
+
max_position_embeddings ......................... 131072
|
| 42635 |
+
max_tokens_to_oom ............................... 12000
|
| 42636 |
+
memory_snapshot_path ............................ snapshot.pickle
|
| 42637 |
+
merge_file ...................................... merges.txt
|
| 42638 |
+
micro_batch_size ................................ 1
|
| 42639 |
+
microbatch_group_size_per_vp_stage .............. None
|
| 42640 |
+
mid_level_dataset_surplus ....................... 0.005
|
| 42641 |
+
min_loss_scale .................................. 1.0
|
| 42642 |
+
min_lr .......................................... 0.0
|
| 42643 |
+
mlp_chunks_for_prefill .......................... 1
|
| 42644 |
+
mmap_bin_files .................................. True
|
| 42645 |
+
mock_data ....................................... True
|
| 42646 |
+
moe_apply_probs_on_input ........................ False
|
| 42647 |
+
moe_aux_loss_coeff .............................. 0.0
|
| 42648 |
+
moe_enable_deepep ............................... False
|
| 42649 |
+
moe_expert_capacity_factor ...................... None
|
| 42650 |
+
moe_extended_tp ................................. False
|
| 42651 |
+
moe_ffn_hidden_size ............................. None
|
| 42652 |
+
moe_grouped_gemm ................................ False
|
| 42653 |
+
moe_input_jitter_eps ............................ None
|
| 42654 |
+
moe_layer_freq .................................. 1
|
| 42655 |
+
moe_layer_recompute ............................. False
|
| 42656 |
+
moe_pad_expert_input_to_capacity ................ False
|
| 42657 |
+
moe_per_layer_logging ........................... False
|
| 42658 |
+
moe_permute_fusion .............................. False
|
| 42659 |
+
moe_router_bias_update_rate ..................... 0.001
|
| 42660 |
+
moe_router_dtype ................................ None
|
| 42661 |
+
moe_router_enable_expert_bias ................... False
|
| 42662 |
+
moe_router_force_load_balancing ................. False
|
| 42663 |
+
moe_router_group_topk ........................... None
|
| 42664 |
+
moe_router_load_balancing_type .................. aux_loss
|
| 42665 |
+
moe_router_num_groups ........................... None
|
| 42666 |
+
moe_router_padding_for_fp8 ...................... False
|
| 42667 |
+
moe_router_pre_softmax .......................... False
|
| 42668 |
+
moe_router_score_function ....................... softmax
|
| 42669 |
+
moe_router_topk ................................. 2
|
| 42670 |
+
moe_router_topk_scaling_factor .................. None
|
| 42671 |
+
moe_shared_expert_intermediate_size ............. None
|
| 42672 |
+
moe_shared_expert_overlap ....................... False
|
| 42673 |
+
moe_token_dispatcher_type ....................... allgather
|
| 42674 |
+
moe_token_drop_policy ........................... probs
|
| 42675 |
+
moe_use_legacy_grouped_gemm ..................... False
|
| 42676 |
+
moe_use_upcycling ............................... False
|
| 42677 |
+
moe_z_loss_coeff ................................ None
|
| 42678 |
+
mrope_section ................................... None
|
| 42679 |
+
mscale .......................................... 1.0
|
| 42680 |
+
mscale_all_dim .................................. 1.0
|
| 42681 |
+
mtp_loss_scaling_factor ......................... 0.1
|
| 42682 |
+
mtp_num_layers .................................. None
|
| 42683 |
+
multi_latent_attention .......................... False
|
| 42684 |
+
nccl_all_reduce_for_prefill ..................... False
|
| 42685 |
+
nccl_communicator_config_path ................... None
|
| 42686 |
+
nccl_ub ......................................... False
|
| 42687 |
+
no_load_optim ................................... None
|
| 42688 |
+
no_load_rng ..................................... None
|
| 42689 |
+
no_persist_layer_norm ........................... False
|
| 42690 |
+
no_rope_freq .................................... None
|
| 42691 |
+
no_save_optim ................................... None
|
| 42692 |
+
no_save_rng ..................................... None
|
| 42693 |
+
non_persistent_ckpt_type ........................ None
|
| 42694 |
+
non_persistent_global_ckpt_dir .................. None
|
| 42695 |
+
non_persistent_local_ckpt_algo .................. fully_parallel
|
| 42696 |
+
non_persistent_local_ckpt_dir ................... None
|
| 42697 |
+
non_persistent_save_interval .................... None
|
| 42698 |
+
norm_epsilon .................................... 1e-05
|
| 42699 |
+
normalization ................................... LayerNorm
|
| 42700 |
+
num_attention_heads ............................. 64
|
| 42701 |
+
num_channels .................................... 3
|
| 42702 |
+
num_classes ..................................... 1000
|
| 42703 |
+
num_dataset_builder_threads ..................... 1
|
| 42704 |
+
num_distributed_optimizer_instances ............. 1
|
| 42705 |
+
num_experts ..................................... None
|
| 42706 |
+
num_layers ...................................... 2
|
| 42707 |
+
num_layers_at_end_in_bf16 ....................... 1
|
| 42708 |
+
num_layers_at_start_in_bf16 ..................... 1
|
| 42709 |
+
num_layers_per_virtual_pipeline_stage ........... None
|
| 42710 |
+
num_query_groups ................................ 16
|
| 42711 |
+
num_virtual_stages_per_pipeline_rank ............ None
|
| 42712 |
+
num_workers ..................................... 2
|
| 42713 |
+
object_storage_cache_path ....................... None
|
| 42714 |
+
one_logger_async ................................ False
|
| 42715 |
+
one_logger_project .............................. megatron-lm
|
| 42716 |
+
one_logger_run_name ............................. None
|
| 42717 |
+
onnx_safe ....................................... None
|
| 42718 |
+
openai_gelu ..................................... False
|
| 42719 |
+
optimizer ....................................... adam
|
| 42720 |
+
optimizer_cpu_offload ........................... False
|
| 42721 |
+
optimizer_offload_fraction ...................... 1.0
|
| 42722 |
+
output_bert_embeddings .......................... False
|
| 42723 |
+
overlap_cpu_optimizer_d2h_h2d ................... False
|
| 42724 |
+
overlap_grad_reduce ............................. False
|
| 42725 |
+
overlap_p2p_comm ................................ False
|
| 42726 |
+
overlap_p2p_comm_warmup_flush ................... False
|
| 42727 |
+
overlap_param_gather ............................ False
|
| 42728 |
+
overlap_param_gather_with_optimizer_step ........ False
|
| 42729 |
+
override_opt_param_scheduler .................... False
|
| 42730 |
+
params_dtype .................................... torch.float16
|
| 42731 |
+
patch_dim ....................................... 16
|
| 42732 |
+
per_split_data_args_path ........................ None
|
| 42733 |
+
perform_initialization .......................... True
|
| 42734 |
+
pin_cpu_grads ................................... True
|
| 42735 |
+
pin_cpu_params .................................. True
|
| 42736 |
+
pipeline_model_parallel_comm_backend ............ None
|
| 42737 |
+
pipeline_model_parallel_size .................... 1
|
| 42738 |
+
pipeline_model_parallel_split_rank .............. None
|
| 42739 |
+
position_embedding_type ......................... learned_absolute
|
| 42740 |
+
pretrained_checkpoint ........................... None
|
| 42741 |
+
profile ......................................... False
|
| 42742 |
+
profile_ranks ................................... [0]
|
| 42743 |
+
profile_step_end ................................ 12
|
| 42744 |
+
profile_step_start .............................. 10
|
| 42745 |
+
q_lora_rank ..................................... None
|
| 42746 |
+
qk_head_dim ..................................... 128
|
| 42747 |
+
qk_l2_norm ...................................... False
|
| 42748 |
+
qk_layernorm .................................... False
|
| 42749 |
+
qk_pos_emb_head_dim ............................. 64
|
| 42750 |
+
query_in_block_prob ............................. 0.1
|
| 42751 |
+
rampup_batch_size ............................... None
|
| 42752 |
+
rank ............................................ 0
|
| 42753 |
+
recompute_granularity ........................... None
|
| 42754 |
+
recompute_method ................................ None
|
| 42755 |
+
recompute_modules ............................... None
|
| 42756 |
+
recompute_num_layers ............................ None
|
| 42757 |
+
record_memory_history ........................... False
|
| 42758 |
+
relative_attention_max_distance ................. 128
|
| 42759 |
+
relative_attention_num_buckets .................. 32
|
| 42760 |
+
replication ..................................... False
|
| 42761 |
+
replication_factor .............................. 2
|
| 42762 |
+
replication_jump ................................ None
|
| 42763 |
+
rerun_mode ...................................... disabled
|
| 42764 |
+
reset_attention_mask ............................ False
|
| 42765 |
+
reset_position_ids .............................. False
|
| 42766 |
+
result_rejected_tracker_filename ................ None
|
| 42767 |
+
retriever_report_topk_accuracies ................ []
|
| 42768 |
+
retriever_score_scaling ......................... False
|
| 42769 |
+
retriever_seq_length ............................ 256
|
| 42770 |
+
retro_add_retriever ............................. False
|
| 42771 |
+
retro_attention_gate ............................ 1
|
| 42772 |
+
retro_cyclic_train_iters ........................ None
|
| 42773 |
+
retro_encoder_attention_dropout ................. 0.1
|
| 42774 |
+
retro_encoder_hidden_dropout .................... 0.1
|
| 42775 |
+
retro_encoder_layers ............................ 2
|
| 42776 |
+
retro_num_neighbors ............................. 2
|
| 42777 |
+
retro_num_retrieved_chunks ...................... 2
|
| 42778 |
+
retro_project_dir ............................... None
|
| 42779 |
+
retro_verify_neighbor_count ..................... True
|
| 42780 |
+
rope_scaling_factor ............................. 8.0
|
| 42781 |
+
rotary_base ..................................... 10000
|
| 42782 |
+
rotary_interleaved .............................. False
|
| 42783 |
+
rotary_percent .................................. 1.0
|
| 42784 |
+
rotary_scaling_factor ........................... 1.0
|
| 42785 |
+
rotary_seq_len_interpolation_factor ............. None
|
| 42786 |
+
run_workload_inspector_server ................... False
|
| 42787 |
+
sample_rate ..................................... 1.0
|
| 42788 |
+
save ............................................ gpt-checkpoint
|
| 42789 |
+
save_interval ................................... 16
|
| 42790 |
+
scatter_gather_tensors_in_pipeline .............. True
|
| 42791 |
+
seed ............................................ 1234
|
| 42792 |
+
seq_length ...................................... 131072
|
| 42793 |
+
sequence_parallel ............................... False
|
| 42794 |
+
sgd_momentum .................................... 0.9
|
| 42795 |
+
short_seq_prob .................................. 0.1
|
| 42796 |
+
skip_train ...................................... False
|
| 42797 |
+
skipped_train_samples ........................... 0
|
| 42798 |
+
spec ............................................ None
|
| 42799 |
+
split ........................................... None
|
| 42800 |
+
squared_relu .................................... False
|
| 42801 |
+
start_weight_decay .............................. 0.1
|
| 42802 |
+
straggler_ctrlr_port ............................ 65535
|
| 42803 |
+
straggler_minmax_count .......................... 1
|
| 42804 |
+
suggested_communication_unit_size ............... None
|
| 42805 |
+
swiglu .......................................... False
|
| 42806 |
+
swin_backbone_type .............................. tiny
|
| 42807 |
+
symmetric_ar_type ............................... None
|
| 42808 |
+
te_rng_tracker .................................. False
|
| 42809 |
+
tensor_model_parallel_size ...................... 4
|
| 42810 |
+
tensorboard_dir ................................. tensorboard-logs/
|
| 42811 |
+
tensorboard_log_interval ........................ 1
|
| 42812 |
+
tensorboard_queue_size .......................... 1000
|
| 42813 |
+
test_data_path .................................. None
|
| 42814 |
+
test_mode ....................................... False
|
| 42815 |
+
tiktoken_num_special_tokens ..................... 1000
|
| 42816 |
+
tiktoken_pattern ................................ None
|
| 42817 |
+
tiktoken_special_tokens ......................... None
|
| 42818 |
+
timing_log_level ................................ 0
|
| 42819 |
+
timing_log_option ............................... minmax
|
| 42820 |
+
titles_data_path ................................ None
|
| 42821 |
+
tokenizer_model ................................. None
|
| 42822 |
+
tokenizer_type .................................. GPT2BPETokenizer
|
| 42823 |
+
torch_fsdp2_reshard_after_forward ............... True
|
| 42824 |
+
tp_comm_bootstrap_backend ....................... nccl
|
| 42825 |
+
tp_comm_bulk_dgrad .............................. True
|
| 42826 |
+
tp_comm_bulk_wgrad .............................. True
|
| 42827 |
+
tp_comm_overlap ................................. False
|
| 42828 |
+
tp_comm_overlap_ag .............................. True
|
| 42829 |
+
tp_comm_overlap_cfg ............................. None
|
| 42830 |
+
tp_comm_overlap_rs .............................. True
|
| 42831 |
+
tp_comm_overlap_rs_dgrad ........................ False
|
| 42832 |
+
tp_comm_split_ag ................................ True
|
| 42833 |
+
tp_comm_split_rs ................................ True
|
| 42834 |
+
train_data_path ................................. None
|
| 42835 |
+
train_iters ..................................... 10
|
| 42836 |
+
train_samples ................................... None
|
| 42837 |
+
train_sync_interval ............................. None
|
| 42838 |
+
transformer_impl ................................ transformer_engine
|
| 42839 |
+
transformer_pipeline_model_parallel_size ........ 1
|
| 42840 |
+
untie_embeddings_and_output_weights ............. False
|
| 42841 |
+
use_checkpoint_args ............................. False
|
| 42842 |
+
use_checkpoint_opt_param_scheduler .............. False
|
| 42843 |
+
use_cpu_initialization .......................... None
|
| 42844 |
+
use_custom_fsdp ................................. False
|
| 42845 |
+
use_dist_ckpt ................................... True
|
| 42846 |
+
use_dist_ckpt_deprecated ........................ False
|
| 42847 |
+
use_distributed_optimizer ....................... False
|
| 42848 |
+
use_flash_attn .................................. False
|
| 42849 |
+
use_legacy_models ............................... False
|
| 42850 |
+
use_mp_args_from_checkpoint_args ................ False
|
| 42851 |
+
use_one_sent_docs ............................... False
|
| 42852 |
+
use_persistent_ckpt_worker ...................... False
|
| 42853 |
+
use_precision_aware_optimizer ................... False
|
| 42854 |
+
use_pytorch_profiler ............................ False
|
| 42855 |
+
use_ring_exchange_p2p ........................... False
|
| 42856 |
+
use_rope_scaling ................................ False
|
| 42857 |
+
use_rotary_position_embeddings .................. False
|
| 42858 |
+
use_sharp ....................................... False
|
| 42859 |
+
use_tokenizer_model_from_checkpoint_args ........ True
|
| 42860 |
+
use_torch_fsdp2 ................................. False
|
| 42861 |
+
use_torch_optimizer_for_cpu_offload ............. False
|
| 42862 |
+
use_tp_pp_dp_mapping ............................ False
|
| 42863 |
+
v_head_dim ...................................... 128
|
| 42864 |
+
valid_data_path ................................. None
|
| 42865 |
+
variable_seq_lengths ............................ False
|
| 42866 |
+
virtual_pipeline_model_parallel_size ............ None
|
| 42867 |
+
vision_backbone_type ............................ vit
|
| 42868 |
+
vision_pretraining .............................. False
|
| 42869 |
+
vision_pretraining_type ......................... classify
|
| 42870 |
+
vocab_extra_ids ................................. 0
|
| 42871 |
+
vocab_file ...................................... vocab.json
|
| 42872 |
+
vocab_size ...................................... None
|
| 42873 |
+
wandb_exp_name ..................................
|
| 42874 |
+
wandb_project ...................................
|
| 42875 |
+
wandb_save_dir ..................................
|
| 42876 |
+
weight_decay .................................... 0.1
|
| 42877 |
+
weight_decay_incr_style ......................... constant
|
| 42878 |
+
wgrad_deferral_limit ............................ 0
|
| 42879 |
+
world_size ...................................... 32
|
| 42880 |
+
yaml_cfg ........................................ None
|
| 42881 |
+
-------------------- end of arguments ---------------------
|
| 42882 |
+
INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1
|
| 42883 |
+
> building GPT2BPETokenizer tokenizer ...
|
| 42884 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42885 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42886 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42887 |
+
> padded vocab (size: 50257) with 431 dummy tokens (new size: 50688)
|
| 42888 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42889 |
+
WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED
|
| 42890 |
+
> initializing torch distributed ...
|
| 42891 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42892 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42893 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42894 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42895 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42896 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 42897 |
+
> initialized tensor model parallel with size 4
|
| 42898 |
+
> initialized pipeline model parallel with size 1
|
| 42899 |
+
> setting random seeds to 1234 ...
|
| 42900 |
+
> compiling dataset index builder ...
|
| 42901 |
+
make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
|
| 42902 |
+
make: Nothing to be done for 'default'.
|
| 42903 |
+
make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
|
| 42904 |
+
>>> done with dataset index builder. Compilation time: 0.055 seconds
|
| 42905 |
+
WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations.
|
| 42906 |
+
> compiling and loading fused kernels ...
|
| 42907 |
+
>>> done with compiling and loading fused kernels. Compilation time: 7.133 seconds
|
| 42908 |
+
time to initialize megatron (seconds): 14.391
|
| 42909 |
+
[after megatron is initialized] datetime: 2025-06-21 22:08:43
|
| 42910 |
+
building GPT model ...
|
attnserver.run_attnserver.slurm.sh.343215.err.log
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d442aa3aa3698c78a8119623862452f4effb5ca97c90181b3013cbcebe4bb6e1
|
| 3 |
+
size 30251346
|
attnserver.run_attnserver.slurm.sh.343215.out.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343225.out.log
CHANGED
|
@@ -22628,3 +22628,19 @@ batch tensor after cp: labels torch.Size([1, 65536])
|
|
| 22628 |
batch tensor after cp: loss_mask torch.Size([1, 65536])
|
| 22629 |
batch tensor after cp: attention_mask torch.Size([1, 1, 65536, 131072])
|
| 22630 |
batch tensor after cp: position_ids torch.Size([1, 65536])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22628 |
batch tensor after cp: loss_mask torch.Size([1, 65536])
|
| 22629 |
batch tensor after cp: attention_mask torch.Size([1, 1, 65536, 131072])
|
| 22630 |
batch tensor after cp: position_ids torch.Size([1, 65536])
|
| 22631 |
+
Start exporting trace 0
|
| 22632 |
+
Done exporting trace 0
|
| 22633 |
+
[2025-06-21 22:07:40] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 58675.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 22634 |
+
Number of parameters in transformer block in billions: 0.35
|
| 22635 |
+
Number of parameters in embedding layers in billions: 0.21
|
| 22636 |
+
Total number of parameters in billions: 0.56
|
| 22637 |
+
Number of parameters in most loaded shard in billions: 0.1400
|
| 22638 |
+
Theoretical memory footprints: weight and optimizer=2403.18 MB
|
| 22639 |
+
[Rank 2] (after 1 iterations) memory (MB) | allocated: 32477.04150390625 | max allocated: 43663.65673828125 | reserved: 45306.0 | max reserved: 45306.0
|
| 22640 |
+
[Rank 3] (after 1 iterations) memory (MB) | allocated: 32477.04150390625 | max allocated: 43663.65673828125 | reserved: 45306.0 | max reserved: 45306.0
|
| 22641 |
+
[Rank 0] (after 1 iterations) memory (MB) | allocated: 32477.04150390625 | max allocated: 43663.65673828125 | reserved: 45306.0 | max reserved: 45306.0
|
| 22642 |
+
[Rank 6] (after 1 iterations) memory (MB) | allocated: 32477.04150390625 | max allocated: 43663.65673828125 | reserved: 45498.0 | max reserved: 45498.0
|
| 22643 |
+
[Rank 1] (after 1 iterations) memory (MB) | allocated: 32477.04150390625 | max allocated: 43663.65673828125 | reserved: 45818.0 | max reserved: 45818.0
|
| 22644 |
+
[Rank 7] (after 1 iterations) memory (MB) | allocated: 32477.04150390625 | max allocated: 43663.65673828125 | reserved: 45882.0 | max reserved: 45882.0[Rank 5] (after 1 iterations) memory (MB) | allocated: 32477.04150390625 | max allocated: 43663.65673828125 | reserved: 45882.0 | max reserved: 45882.0
|
| 22645 |
+
|
| 22646 |
+
[Rank 4] (after 1 iterations) memory (MB) | allocated: 32477.04150390625 | max allocated: 43663.65673828125 | reserved: 45882.0 | max reserved: 45882.0
|
attnserver.run_attnserver.slurm.sh.343226.out.log
CHANGED
|
@@ -19110,3 +19110,86 @@ batch tensor after cp: labels torch.Size([2, 81920])
|
|
| 19110 |
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19111 |
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19112 |
batch tensor after cp: position_ids torch.Size([2, 81920])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19110 |
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19111 |
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19112 |
batch tensor after cp: position_ids torch.Size([2, 81920])
|
| 19113 |
+
batch tensor: tokens torch.Size([2, 163840])
|
| 19114 |
+
batch tensor: labels torch.Size([2, 163840])
|
| 19115 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
| 19116 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
| 19117 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
| 19118 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
| 19119 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
| 19120 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19121 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19122 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
| 19123 |
+
batch tensor: tokens torch.Size([2, 163840])
|
| 19124 |
+
batch tensor: labels torch.Size([2, 163840])
|
| 19125 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
| 19126 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
| 19127 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
| 19128 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
| 19129 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
| 19130 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19131 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19132 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
| 19133 |
+
Start exporting trace 1
|
| 19134 |
+
Done exporting trace 1
|
| 19135 |
+
[2025-06-21 22:08:03] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 89403.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 19136 |
+
batch tensor: tokens torch.Size([2, 163840])
|
| 19137 |
+
batch tensor: labels torch.Size([2, 163840])
|
| 19138 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
| 19139 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
| 19140 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
| 19141 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
| 19142 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
| 19143 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19144 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19145 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
| 19146 |
+
batch tensor: tokens torch.Size([2, 163840])
|
| 19147 |
+
batch tensor: labels torch.Size([2, 163840])
|
| 19148 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
| 19149 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
| 19150 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
| 19151 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
| 19152 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
| 19153 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19154 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19155 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
| 19156 |
+
batch tensor: tokens torch.Size([2, 163840])
|
| 19157 |
+
batch tensor: labels torch.Size([2, 163840])
|
| 19158 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
| 19159 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
| 19160 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
| 19161 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
| 19162 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
| 19163 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19164 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19165 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
| 19166 |
+
batch tensor: tokens torch.Size([2, 163840])
|
| 19167 |
+
batch tensor: labels torch.Size([2, 163840])
|
| 19168 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
| 19169 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
| 19170 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
| 19171 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
| 19172 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
| 19173 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19174 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19175 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
| 19176 |
+
batch tensor: tokens torch.Size([2, 163840])
|
| 19177 |
+
batch tensor: labels torch.Size([2, 163840])
|
| 19178 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
| 19179 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
| 19180 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
| 19181 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
| 19182 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
| 19183 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19184 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19185 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
| 19186 |
+
batch tensor: tokens torch.Size([2, 163840])
|
| 19187 |
+
batch tensor: labels torch.Size([2, 163840])
|
| 19188 |
+
batch tensor: loss_mask torch.Size([2, 163840])
|
| 19189 |
+
batch tensor: attention_mask torch.Size([2, 1, 163840, 163840])
|
| 19190 |
+
batch tensor: position_ids torch.Size([2, 163840])
|
| 19191 |
+
batch tensor after cp: tokens torch.Size([2, 81920])
|
| 19192 |
+
batch tensor after cp: labels torch.Size([2, 81920])
|
| 19193 |
+
batch tensor after cp: loss_mask torch.Size([2, 81920])
|
| 19194 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840])
|
| 19195 |
+
batch tensor after cp: position_ids torch.Size([2, 81920])
|
attnserver.run_attnserver.slurm.sh.343237.out.log
CHANGED
|
@@ -32688,3 +32688,317 @@ batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
|
| 32688 |
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32689 |
Start exporting trace 1
|
| 32690 |
Done exporting trace 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32688 |
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32689 |
Start exporting trace 1
|
| 32690 |
Done exporting trace 1
|
| 32691 |
+
[2025-06-21 22:07:33] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 74335.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 32692 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32693 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32694 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32695 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32696 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32697 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32698 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32699 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32700 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32701 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32702 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32703 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32704 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32705 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32706 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32707 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32708 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32709 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32710 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32711 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32712 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32713 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32714 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32715 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32716 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32717 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32718 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32719 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32720 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32721 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32722 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32723 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32724 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32725 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32726 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32727 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32728 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32729 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32730 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32731 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32732 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32733 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32734 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32735 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32736 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32737 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32738 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32739 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32740 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32741 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32742 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32743 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32744 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32745 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32746 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32747 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32748 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32749 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32750 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32751 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32752 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32753 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32754 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32755 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32756 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32757 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32758 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32759 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32760 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32761 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32762 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32763 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32764 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32765 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32766 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32767 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32768 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32769 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32770 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32771 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32772 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32773 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32774 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32775 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32776 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32777 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32778 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32779 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32780 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32781 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32782 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32783 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32784 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32785 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32786 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32787 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32788 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32789 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32790 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32791 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32792 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32793 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32794 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32795 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32796 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32797 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32798 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32799 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32800 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32801 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32802 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32803 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32804 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32805 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32806 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32807 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32808 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32809 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32810 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32811 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32812 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32813 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32814 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32815 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32816 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32817 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32818 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32819 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32820 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32821 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32822 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32823 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32824 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32825 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32826 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32827 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32828 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32829 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32830 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32831 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32832 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32833 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32834 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32835 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32836 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32837 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32838 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32839 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32840 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32841 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32842 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32843 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32844 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32845 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32846 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32847 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32848 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32849 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32850 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32851 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32852 |
+
Start exporting trace 2
|
| 32853 |
+
Done exporting trace 2
|
| 32854 |
+
[2025-06-21 22:08:14] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 41354.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 32855 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32856 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32857 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32858 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32859 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32860 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32861 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32862 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32863 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32864 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32865 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32866 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32867 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32868 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32869 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32870 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32871 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32872 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32873 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32874 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32875 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32876 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32877 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32878 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32879 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32880 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32881 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32882 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32883 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32884 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32885 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32886 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32887 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32888 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32889 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32890 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32891 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32892 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32893 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32894 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32895 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32896 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32897 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32898 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32899 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32900 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32901 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32902 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32903 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32904 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32905 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32906 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32907 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32908 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32909 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32910 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32911 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32912 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32913 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32914 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32915 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32916 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32917 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32918 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32919 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32920 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32921 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32922 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32923 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32924 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32925 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32926 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32927 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32928 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32929 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32930 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32931 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32932 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32933 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32934 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32935 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32936 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32937 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32938 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32939 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32940 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32941 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32942 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32943 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32944 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32945 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32946 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32947 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32948 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32949 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32950 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32951 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32952 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32953 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32954 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32955 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32956 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32957 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32958 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32959 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32960 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32961 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32962 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32963 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32964 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32965 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32966 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32967 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32968 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32969 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32970 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32971 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32972 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32973 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32974 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32975 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32976 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32977 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32978 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32979 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32980 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32981 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32982 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32983 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32984 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32985 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32986 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32987 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32988 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32989 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32990 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 32991 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 32992 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 32993 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 32994 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
| 32995 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32996 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32997 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32998 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32999 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33000 |
+
batch tensor after cp: tokens torch.Size([1, 10240])
|
| 33001 |
+
batch tensor after cp: labels torch.Size([1, 10240])
|
| 33002 |
+
batch tensor after cp: loss_mask torch.Size([1, 10240])
|
| 33003 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 10240, 81920])
|
| 33004 |
+
batch tensor after cp: position_ids torch.Size([1, 10240])
|
attnserver.run_attnserver.slurm.sh.343238.out.log
CHANGED
|
@@ -26756,3 +26756,479 @@ batch tensor after cp: position_ids torch.Size([2, 16384])
|
|
| 26756 |
Start exporting trace 1
|
| 26757 |
Done exporting trace 1
|
| 26758 |
[2025-06-21 22:07:18] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 35680.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26756 |
Start exporting trace 1
|
| 26757 |
Done exporting trace 1
|
| 26758 |
[2025-06-21 22:07:18] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 35680.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 26759 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26760 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26761 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26762 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26763 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26764 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26765 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26766 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26767 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26768 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26769 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26770 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26771 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26772 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26773 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26774 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26775 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26776 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26777 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26778 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26779 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26780 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26781 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26782 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26783 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26784 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26785 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26786 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26787 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26788 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26789 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26790 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26791 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26792 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26793 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26794 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26795 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26796 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26797 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26798 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26799 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26800 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26801 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26802 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26803 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26804 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26805 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26806 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26807 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26808 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26809 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26810 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26811 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26812 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26813 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26814 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26815 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26816 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26817 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26818 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26819 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26820 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26821 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26822 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26823 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26824 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26825 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26826 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26827 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26828 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26829 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26830 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26831 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26832 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26833 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26834 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26835 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26836 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26837 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26838 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26839 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26840 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26841 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26842 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26843 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26844 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26845 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26846 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26847 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26848 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26849 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26850 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26851 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26852 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26853 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26854 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26855 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26856 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26857 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26858 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26859 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26860 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26861 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26862 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26863 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26864 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26865 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26866 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26867 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26868 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26869 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26870 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26871 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26872 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26873 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26874 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26875 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26876 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26877 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26878 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26879 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26880 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26881 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26882 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26883 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26884 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26885 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26886 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26887 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26888 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26889 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26890 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26891 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26892 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26893 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26894 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26895 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26896 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26897 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26898 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26899 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26900 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26901 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26902 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26903 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26904 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26905 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26906 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26907 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26908 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26909 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26910 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26911 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26912 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26913 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26914 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26915 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26916 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26917 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26918 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26919 |
+
Start exporting trace 2
|
| 26920 |
+
Done exporting trace 2
|
| 26921 |
+
[2025-06-21 22:07:49] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 31377.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 26922 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26923 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26924 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26925 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26926 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26927 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26928 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26929 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26930 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26931 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26932 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26933 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26934 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26935 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26936 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26937 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26938 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26939 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26940 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26941 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26942 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26943 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26944 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26945 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26946 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26947 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26948 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26949 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26950 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26951 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26952 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26953 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26954 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26955 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26956 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26957 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26958 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26959 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26960 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26961 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26962 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26963 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26964 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26965 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26966 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26967 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26968 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26969 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26970 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26971 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26972 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26973 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26974 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26975 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26976 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26977 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26978 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26979 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26980 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26981 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26982 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26983 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26984 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26985 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26986 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26987 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26988 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26989 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 26990 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 26991 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 26992 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 26993 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 26994 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 26995 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 26996 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 26997 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 26998 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 26999 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27000 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27001 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27002 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27003 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27004 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27005 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27006 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27007 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27008 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27009 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27010 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27011 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27012 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27013 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27014 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27015 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27016 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27017 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27018 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27019 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27020 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27021 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27022 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27023 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27024 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27025 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27026 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27027 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27028 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27029 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27030 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27031 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27032 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27033 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27034 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27035 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27036 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27037 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27038 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27039 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27040 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27041 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27042 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27043 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27044 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27045 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27046 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27047 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27048 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27049 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27050 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27051 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27052 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27053 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27054 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27055 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27056 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27057 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27058 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27059 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27060 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27061 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27062 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27063 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27064 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27065 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27066 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27067 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27068 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27069 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27070 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27071 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27072 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27073 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27074 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27075 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27076 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27077 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27078 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27079 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27080 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27081 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27082 |
+
Start exporting trace 3
|
| 27083 |
+
Done exporting trace 3
|
| 27084 |
+
[2025-06-21 22:08:20] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 31157.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 27085 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27086 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27087 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27088 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27089 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27090 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27091 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27092 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27093 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27094 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27095 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27096 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27097 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27098 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27099 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27100 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27101 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27102 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27103 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27104 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27105 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27106 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27107 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27108 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27109 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27110 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27111 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27112 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27113 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27114 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27115 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27116 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27117 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27118 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27119 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27120 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27121 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27122 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27123 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27124 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27125 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27126 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27127 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27128 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27129 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27130 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27131 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27132 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27133 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27134 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27135 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27136 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27137 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27138 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27139 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27140 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27141 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27142 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27143 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27144 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27145 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27146 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27147 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27148 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27149 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27150 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27151 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27152 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27153 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27154 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27155 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27156 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27157 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27158 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27159 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27160 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27161 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27162 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27163 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27164 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27165 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27166 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27167 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27168 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27169 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27170 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27171 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27172 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27173 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27174 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27175 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27176 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27177 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27178 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27179 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27180 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27181 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27182 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27183 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27184 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27185 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27186 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27187 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27188 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27189 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27190 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27191 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27192 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27193 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27194 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27195 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27196 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27197 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27198 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27199 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27200 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27201 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27202 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27203 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27204 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27205 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27206 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27207 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27208 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27209 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27210 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27211 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27212 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27213 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27214 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27215 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27216 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27217 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27218 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27219 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27220 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27221 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27222 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27223 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27224 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
| 27225 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 27226 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 27227 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 27228 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 27229 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 27230 |
+
batch tensor after cp: tokens torch.Size([2, 16384])
|
| 27231 |
+
batch tensor after cp: labels torch.Size([2, 16384])
|
| 27232 |
+
batch tensor after cp: loss_mask torch.Size([2, 16384])
|
| 27233 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 16384, 131072])
|
| 27234 |
+
batch tensor after cp: position_ids torch.Size([2, 16384])
|
attnserver.run_attnserver.slurm.sh.343239.err.log
CHANGED
|
@@ -1186,3 +1186,489 @@ W0621 22:07:10.319000 2142274 site-packages/torch/distributed/run.py:766]
|
|
| 1186 |
W0621 22:07:10.319000 2142274 site-packages/torch/distributed/run.py:766] *****************************************
|
| 1187 |
W0621 22:07:10.319000 2142274 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 1188 |
W0621 22:07:10.319000 2142274 site-packages/torch/distributed/run.py:766] *****************************************
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1186 |
W0621 22:07:10.319000 2142274 site-packages/torch/distributed/run.py:766] *****************************************
|
| 1187 |
W0621 22:07:10.319000 2142274 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 1188 |
W0621 22:07:10.319000 2142274 site-packages/torch/distributed/run.py:766] *****************************************
|
| 1189 |
+
[rank1]:[W621 22:07:32.480114235 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1190 |
+
[rank3]:[W621 22:07:32.480114328 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1191 |
+
[rank2]:[W621 22:07:32.480220492 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1192 |
+
[rank7]:[W621 22:07:32.480275848 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1193 |
+
[rank6]:[W621 22:07:32.481269400 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1194 |
+
[rank5]:[W621 22:07:32.481507902 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1195 |
+
[rank15]:[W621 22:07:32.903977596 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1196 |
+
[rank10]:[W621 22:07:32.909462870 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1197 |
+
[rank12]:[W621 22:07:32.909537097 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1198 |
+
[rank11]:[W621 22:07:32.909539083 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1199 |
+
[rank13]:[W621 22:07:32.909725577 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1200 |
+
[rank9]:[W621 22:07:32.909834877 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1201 |
+
[rank4]:[W621 22:07:32.488882234 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1202 |
+
[rank14]:[W621 22:07:32.910300560 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1203 |
+
[rank8]:[W621 22:07:32.996916035 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1204 |
+
[rank0]:[W621 22:07:32.628605508 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1205 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1206 |
+
warnings.warn(
|
| 1207 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1208 |
+
warnings.warn(
|
| 1209 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1210 |
+
warnings.warn(
|
| 1211 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1212 |
+
warnings.warn(
|
| 1213 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1214 |
+
warnings.warn(
|
| 1215 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1216 |
+
warnings.warn(
|
| 1217 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1218 |
+
warnings.warn(
|
| 1219 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1220 |
+
warnings.warn(
|
| 1221 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1222 |
+
warnings.warn(
|
| 1223 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1224 |
+
warnings.warn(
|
| 1225 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1226 |
+
warnings.warn(
|
| 1227 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1228 |
+
warnings.warn(
|
| 1229 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1230 |
+
warnings.warn(
|
| 1231 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1232 |
+
warnings.warn(
|
| 1233 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1234 |
+
warnings.warn(
|
| 1235 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1236 |
+
warnings.warn(
|
| 1237 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1238 |
+
warnings.warn(
|
| 1239 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1240 |
+
warnings.warn(
|
| 1241 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1242 |
+
warnings.warn(
|
| 1243 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1244 |
+
warnings.warn(
|
| 1245 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1246 |
+
warnings.warn(
|
| 1247 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1248 |
+
warnings.warn(
|
| 1249 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1250 |
+
warnings.warn(
|
| 1251 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1252 |
+
warnings.warn(
|
| 1253 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1254 |
+
warnings.warn(
|
| 1255 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1256 |
+
warnings.warn(
|
| 1257 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1258 |
+
warnings.warn(
|
| 1259 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1260 |
+
warnings.warn(
|
| 1261 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1262 |
+
warnings.warn(
|
| 1263 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1264 |
+
warnings.warn(
|
| 1265 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1266 |
+
warnings.warn(
|
| 1267 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1268 |
+
warnings.warn(
|
| 1269 |
+
[rank8]: PermissionError: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__8_0.distcp'
|
| 1270 |
+
|
| 1271 |
+
[rank8]: The above exception was the direct cause of the following exception:
|
| 1272 |
+
|
| 1273 |
+
[rank8]: Traceback (most recent call last):
|
| 1274 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 1275 |
+
[rank8]: pretrain(
|
| 1276 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 1277 |
+
[rank8]: save_checkpoint(
|
| 1278 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 1279 |
+
[rank8]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 1280 |
+
[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1281 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 1282 |
+
[rank8]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1283 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 1284 |
+
[rank8]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1285 |
+
[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1286 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 1287 |
+
[rank8]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 1288 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 1289 |
+
[rank8]: finalize_fn()
|
| 1290 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 1291 |
+
[rank8]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 1292 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 229, in save_state_dict_async_finalize
|
| 1293 |
+
[rank8]: write_results = storage_writer.retrieve_write_results()
|
| 1294 |
+
[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1295 |
+
[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 436, in retrieve_write_results
|
| 1296 |
+
[rank8]: raise RuntimeError(f'Worker failure: {write_results_or_exc}') from write_results_or_exc
|
| 1297 |
+
[rank8]: RuntimeError: Worker failure: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__8_0.distcp'
|
| 1298 |
+
[rank11]: PermissionError: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__11_0.distcp'
|
| 1299 |
+
|
| 1300 |
+
[rank11]: The above exception was the direct cause of the following exception:
|
| 1301 |
+
|
| 1302 |
+
[rank11]: Traceback (most recent call last):
|
| 1303 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 1304 |
+
[rank11]: pretrain(
|
| 1305 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 1306 |
+
[rank11]: save_checkpoint(
|
| 1307 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 1308 |
+
[rank11]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 1309 |
+
[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1310 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 1311 |
+
[rank11]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1312 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 1313 |
+
[rank11]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1314 |
+
[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1315 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 1316 |
+
[rank11]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 1317 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 1318 |
+
[rank11]: finalize_fn()
|
| 1319 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 1320 |
+
[rank11]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 1321 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 229, in save_state_dict_async_finalize
|
| 1322 |
+
[rank11]: write_results = storage_writer.retrieve_write_results()
|
| 1323 |
+
[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1324 |
+
[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 436, in retrieve_write_results
|
| 1325 |
+
[rank11]: raise RuntimeError(f'Worker failure: {write_results_or_exc}') from write_results_or_exc
|
| 1326 |
+
[rank11]: RuntimeError: Worker failure: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__11_0.distcp'
|
| 1327 |
+
[rank9]: PermissionError: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__9_0.distcp'
|
| 1328 |
+
|
| 1329 |
+
[rank9]: The above exception was the direct cause of the following exception:
|
| 1330 |
+
|
| 1331 |
+
[rank9]: Traceback (most recent call last):
|
| 1332 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 1333 |
+
[rank9]: pretrain(
|
| 1334 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 1335 |
+
[rank9]: save_checkpoint(
|
| 1336 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 1337 |
+
[rank9]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 1338 |
+
[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1339 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 1340 |
+
[rank9]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1341 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 1342 |
+
[rank9]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1343 |
+
[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1344 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 1345 |
+
[rank9]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 1346 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 1347 |
+
[rank9]: finalize_fn()
|
| 1348 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 1349 |
+
[rank9]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 1350 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 229, in save_state_dict_async_finalize
|
| 1351 |
+
[rank9]: write_results = storage_writer.retrieve_write_results()
|
| 1352 |
+
[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1353 |
+
[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 436, in retrieve_write_results
|
| 1354 |
+
[rank9]: raise RuntimeError(f'Worker failure: {write_results_or_exc}') from write_results_or_exc
|
| 1355 |
+
[rank9]: RuntimeError: Worker failure: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__9_0.distcp'
|
| 1356 |
+
[rank14]: PermissionError: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__14_0.distcp'
|
| 1357 |
+
|
| 1358 |
+
[rank14]: The above exception was the direct cause of the following exception:
|
| 1359 |
+
|
| 1360 |
+
[rank14]: Traceback (most recent call last):
|
| 1361 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 1362 |
+
[rank14]: pretrain(
|
| 1363 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 1364 |
+
[rank14]: save_checkpoint(
|
| 1365 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 1366 |
+
[rank14]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 1367 |
+
[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1368 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 1369 |
+
[rank14]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1370 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 1371 |
+
[rank14]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1372 |
+
[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1373 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 1374 |
+
[rank14]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 1375 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 1376 |
+
[rank14]: finalize_fn()
|
| 1377 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 1378 |
+
[rank14]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 1379 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 229, in save_state_dict_async_finalize
|
| 1380 |
+
[rank14]: write_results = storage_writer.retrieve_write_results()
|
| 1381 |
+
[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1382 |
+
[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 436, in retrieve_write_results
|
| 1383 |
+
[rank14]: raise RuntimeError(f'Worker failure: {write_results_or_exc}') from write_results_or_exc
|
| 1384 |
+
[rank14]: RuntimeError: Worker failure: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__14_0.distcp'
|
| 1385 |
+
[rank12]: PermissionError: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__12_0.distcp'
|
| 1386 |
+
|
| 1387 |
+
[rank12]: The above exception was the direct cause of the following exception:
|
| 1388 |
+
|
| 1389 |
+
[rank12]: Traceback (most recent call last):
|
| 1390 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 1391 |
+
[rank12]: pretrain(
|
| 1392 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 1393 |
+
[rank12]: save_checkpoint(
|
| 1394 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 1395 |
+
[rank12]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 1396 |
+
[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1397 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 1398 |
+
[rank12]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1399 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 1400 |
+
[rank12]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1401 |
+
[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1402 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 1403 |
+
[rank12]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 1404 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 1405 |
+
[rank12]: finalize_fn()
|
| 1406 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 1407 |
+
[rank12]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 1408 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 229, in save_state_dict_async_finalize
|
| 1409 |
+
[rank12]: write_results = storage_writer.retrieve_write_results()
|
| 1410 |
+
[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1411 |
+
[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 436, in retrieve_write_results
|
| 1412 |
+
[rank12]: raise RuntimeError(f'Worker failure: {write_results_or_exc}') from write_results_or_exc
|
| 1413 |
+
[rank12]: RuntimeError: Worker failure: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__12_0.distcp'
|
| 1414 |
+
[rank13]: PermissionError: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__13_0.distcp'
|
| 1415 |
+
|
| 1416 |
+
[rank13]: The above exception was the direct cause of the following exception:
|
| 1417 |
+
|
| 1418 |
+
[rank13]: Traceback (most recent call last):
|
| 1419 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 1420 |
+
[rank13]: pretrain(
|
| 1421 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 1422 |
+
[rank13]: save_checkpoint(
|
| 1423 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 1424 |
+
[rank13]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 1425 |
+
[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1426 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 1427 |
+
[rank13]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1428 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 1429 |
+
[rank13]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1430 |
+
[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1431 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 1432 |
+
[rank13]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 1433 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 1434 |
+
[rank13]: finalize_fn()
|
| 1435 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 1436 |
+
[rank13]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 1437 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 229, in save_state_dict_async_finalize
|
| 1438 |
+
[rank13]: write_results = storage_writer.retrieve_write_results()
|
| 1439 |
+
[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1440 |
+
[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 436, in retrieve_write_results
|
| 1441 |
+
[rank13]: raise RuntimeError(f'Worker failure: {write_results_or_exc}') from write_results_or_exc
|
| 1442 |
+
[rank13]: RuntimeError: Worker failure: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__13_0.distcp'
|
| 1443 |
+
[rank10]: PermissionError: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__10_0.distcp'
|
| 1444 |
+
|
| 1445 |
+
[rank10]: The above exception was the direct cause of the following exception:
|
| 1446 |
+
|
| 1447 |
+
[rank10]: Traceback (most recent call last):
|
| 1448 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 1449 |
+
[rank10]: pretrain(
|
| 1450 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 1451 |
+
[rank10]: save_checkpoint(
|
| 1452 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 1453 |
+
[rank10]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 1454 |
+
[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1455 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 1456 |
+
[rank10]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1457 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 1458 |
+
[rank10]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1459 |
+
[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1460 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 1461 |
+
[rank10]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 1462 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 1463 |
+
[rank10]: finalize_fn()
|
| 1464 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 1465 |
+
[rank10]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 1466 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 229, in save_state_dict_async_finalize
|
| 1467 |
+
[rank10]: write_results = storage_writer.retrieve_write_results()
|
| 1468 |
+
[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1469 |
+
[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 436, in retrieve_write_results
|
| 1470 |
+
[rank10]: raise RuntimeError(f'Worker failure: {write_results_or_exc}') from write_results_or_exc
|
| 1471 |
+
[rank10]: RuntimeError: Worker failure: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__10_0.distcp'
|
| 1472 |
+
[rank15]: PermissionError: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__15_0.distcp'
|
| 1473 |
+
|
| 1474 |
+
[rank15]: The above exception was the direct cause of the following exception:
|
| 1475 |
+
|
| 1476 |
+
[rank15]: Traceback (most recent call last):
|
| 1477 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 1478 |
+
[rank15]: pretrain(
|
| 1479 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 1480 |
+
[rank15]: save_checkpoint(
|
| 1481 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 1482 |
+
[rank15]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 1483 |
+
[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1484 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 1485 |
+
[rank15]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1486 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 1487 |
+
[rank15]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 1488 |
+
[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1489 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 1490 |
+
[rank15]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 1491 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 1492 |
+
[rank15]: finalize_fn()
|
| 1493 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 1494 |
+
[rank15]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 1495 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 229, in save_state_dict_async_finalize
|
| 1496 |
+
[rank15]: write_results = storage_writer.retrieve_write_results()
|
| 1497 |
+
[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1498 |
+
[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 436, in retrieve_write_results
|
| 1499 |
+
[rank15]: raise RuntimeError(f'Worker failure: {write_results_or_exc}') from write_results_or_exc
|
| 1500 |
+
[rank15]: RuntimeError: Worker failure: [Errno 13] Permission denied: 'gpt-checkpoint/iter_0000010/__15_0.distcp'
|
| 1501 |
+
[rank9]:[W621 22:08:21.261097622 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 1502 |
+
[rank13]:[W621 22:08:21.341754800 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 1503 |
+
[rank11]:[W621 22:08:22.522922287 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 1504 |
+
[rank15]:[W621 22:08:22.784499176 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 1505 |
+
W0621 22:08:23.891000 796503 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 796574 closing signal SIGTERM
|
| 1506 |
+
W0621 22:08:23.894000 796503 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 796576 closing signal SIGTERM
|
| 1507 |
+
W0621 22:08:23.898000 796503 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 796577 closing signal SIGTERM
|
| 1508 |
+
W0621 22:08:23.899000 796503 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 796578 closing signal SIGTERM
|
| 1509 |
+
W0621 22:08:23.902000 796503 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 796579 closing signal SIGTERM
|
| 1510 |
+
W0621 22:08:23.903000 796503 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 796580 closing signal SIGTERM
|
| 1511 |
+
W0621 22:08:23.905000 796503 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 796581 closing signal SIGTERM
|
| 1512 |
+
E0621 22:08:25.861000 796503 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 796575) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 1513 |
+
Traceback (most recent call last):
|
| 1514 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
| 1515 |
+
File "<frozen runpy>", line 88, in _run_code
|
| 1516 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
| 1517 |
+
main()
|
| 1518 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
| 1519 |
+
return arg(*args, **kwargs)
|
| 1520 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 1521 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
| 1522 |
+
launch(args)
|
| 1523 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
| 1524 |
+
run(args)
|
| 1525 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
| 1526 |
+
elastic_launch(
|
| 1527 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
| 1528 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 1529 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1530 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
| 1531 |
+
raise ChildFailedError(
|
| 1532 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 1533 |
+
============================================================
|
| 1534 |
+
./pretrain_gpt_profile.py FAILED
|
| 1535 |
+
------------------------------------------------------------
|
| 1536 |
+
Failures:
|
| 1537 |
+
<NO_OTHER_FAILURES>
|
| 1538 |
+
------------------------------------------------------------
|
| 1539 |
+
Root Cause (first observed failure):
|
| 1540 |
+
[0]:
|
| 1541 |
+
time : 2025-06-21_22:08:23
|
| 1542 |
+
host : fs-mbz-gpu-188
|
| 1543 |
+
rank : 9 (local_rank: 1)
|
| 1544 |
+
exitcode : 1 (pid: 796575)
|
| 1545 |
+
error_file: <N/A>
|
| 1546 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 1547 |
+
============================================================
|
| 1548 |
+
+ set +x
|
| 1549 |
+
W0621 22:08:26.617000 2142274 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2142345 closing signal SIGTERM
|
| 1550 |
+
W0621 22:08:26.619000 2142274 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2142346 closing signal SIGTERM
|
| 1551 |
+
W0621 22:08:26.624000 2142274 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2142347 closing signal SIGTERM
|
| 1552 |
+
W0621 22:08:26.628000 2142274 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2142348 closing signal SIGTERM
|
| 1553 |
+
W0621 22:08:26.632000 2142274 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2142349 closing signal SIGTERM
|
| 1554 |
+
W0621 22:08:26.633000 2142274 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2142350 closing signal SIGTERM
|
| 1555 |
+
W0621 22:08:26.667000 2142274 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2142351 closing signal SIGTERM
|
| 1556 |
+
W0621 22:08:26.673000 2142274 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2142352 closing signal SIGTERM
|
| 1557 |
+
[W621 22:08:29.974772999 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-141]:55442, remote=[fs-mbz-gpu-188]:29500): Broken pipe
|
| 1558 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
| 1559 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14603c1785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
| 1560 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14602545aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1561 |
+
frame #2: <unknown function> + 0x5baa358 (0x14602545c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1562 |
+
frame #3: <unknown function> + 0x5babb3e (0x14602545db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1563 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x146025457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1564 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x146025457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1565 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x146025458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1566 |
+
frame #7: <unknown function> + 0xc0f526 (0x14603478b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 1567 |
+
frame #8: <unknown function> + 0x37f17d (0x146033efb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 1568 |
+
<omitting python frames>
|
| 1569 |
+
frame #26: <unknown function> + 0x29d90 (0x14603d49ad90 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 1570 |
+
frame #27: __libc_start_main + 0x80 (0x14603d49ae40 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 1571 |
+
|
| 1572 |
+
W0621 22:08:29.975000 2142274 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-141_2142274_0' has failed to shutdown the rendezvous '343239' due to an error of type RendezvousConnectionError.
|
| 1573 |
+
[W621 22:08:29.989429258 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-141]:55442, remote=[fs-mbz-gpu-188]:29500): Broken pipe
|
| 1574 |
+
Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
|
| 1575 |
+
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14603c1785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
|
| 1576 |
+
frame #1: <unknown function> + 0x5ba8afe (0x14602545aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1577 |
+
frame #2: <unknown function> + 0x5baa358 (0x14602545c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1578 |
+
frame #3: <unknown function> + 0x5babb3e (0x14602545db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1579 |
+
frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x146025457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1580 |
+
frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x146025457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1581 |
+
frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x146025458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
|
| 1582 |
+
frame #7: <unknown function> + 0xc0f526 (0x14603478b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 1583 |
+
frame #8: <unknown function> + 0x37f17d (0x146033efb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
|
| 1584 |
+
<omitting python frames>
|
| 1585 |
+
frame #26: <unknown function> + 0x29d90 (0x14603d49ad90 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 1586 |
+
frame #27: __libc_start_main + 0x80 (0x14603d49ae40 in /lib/x86_64-linux-gnu/libc.so.6)
|
| 1587 |
+
|
| 1588 |
+
W0621 22:08:29.987000 2142274 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-141_2142274_0' has failed to shutdown the rendezvous '343239' due to an error of type RendezvousConnectionError.
|
| 1589 |
+
Traceback (most recent call last):
|
| 1590 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 117, in _call_store
|
| 1591 |
+
return getattr(self._store, store_op)(*args, **kwargs)
|
| 1592 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1593 |
+
torch.distributed.DistNetworkError: failed to recv, got 0 bytes
|
| 1594 |
+
|
| 1595 |
+
The above exception was the direct cause of the following exception:
|
| 1596 |
+
|
| 1597 |
+
Traceback (most recent call last):
|
| 1598 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
| 1599 |
+
File "<frozen runpy>", line 88, in _run_code
|
| 1600 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
| 1601 |
+
main()
|
| 1602 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
| 1603 |
+
return arg(*args, **kwargs)
|
| 1604 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 1605 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
| 1606 |
+
launch(args)
|
| 1607 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
| 1608 |
+
run(args)
|
| 1609 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
| 1610 |
+
elastic_launch(
|
| 1611 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
| 1612 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 1613 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1614 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 261, in launch_agent
|
| 1615 |
+
result = agent.run()
|
| 1616 |
+
^^^^^^^^^^^
|
| 1617 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/metrics/api.py", line 138, in wrapper
|
| 1618 |
+
result = f(*args, **kwargs)
|
| 1619 |
+
^^^^^^^^^^^^^^^^^^
|
| 1620 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 711, in run
|
| 1621 |
+
result = self._invoke_run(role)
|
| 1622 |
+
^^^^^^^^^^^^^^^^^^^^^^
|
| 1623 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 906, in _invoke_run
|
| 1624 |
+
num_nodes_waiting = rdzv_handler.num_nodes_waiting()
|
| 1625 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1626 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py", line 1263, in num_nodes_waiting
|
| 1627 |
+
self._state_holder.sync()
|
| 1628 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py", line 437, in sync
|
| 1629 |
+
get_response = self._backend.get_state()
|
| 1630 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1631 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 75, in get_state
|
| 1632 |
+
base64_state: bytes = self._call_store("get", self._key)
|
| 1633 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 1634 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 119, in _call_store
|
| 1635 |
+
raise RendezvousConnectionError(
|
| 1636 |
+
torch.distributed.elastic.rendezvous.api.RendezvousConnectionError: The connection to the C10d store has failed. See inner exception for details.
|
| 1637 |
+
+ set +x
|
| 1638 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
| 1639 |
+
+ export PROF_CTX_LENGTH=16384
|
| 1640 |
+
+ PROF_CTX_LENGTH=16384
|
| 1641 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp2.cp8.bs4.json'
|
| 1642 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp2.cp8.bs4.json' ']'
|
| 1643 |
+
+ echo 'Running ctx_length=16384, TP_SIZE=2, CP_SIZE=8, BATCH_SIZE=4'
|
| 1644 |
+
+ srun bash ./attnserver.sh
|
| 1645 |
+
+ which python3
|
| 1646 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 0 --rdzv_id 343239 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-188:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 1647 |
+
+ which python3
|
| 1648 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 2 --node_rank 1 --rdzv_id 343239 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-188:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 1649 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 1650 |
+
and will be removed in future. Use torchrun.
|
| 1651 |
+
Note that --use-env is set by default in torchrun.
|
| 1652 |
+
If your script expects `--local-rank` argument to be set, please
|
| 1653 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 1654 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 1655 |
+
further instructions
|
| 1656 |
+
|
| 1657 |
+
main()
|
| 1658 |
+
W0621 22:08:33.204000 2145381 site-packages/torch/distributed/run.py:766]
|
| 1659 |
+
W0621 22:08:33.204000 2145381 site-packages/torch/distributed/run.py:766] *****************************************
|
| 1660 |
+
W0621 22:08:33.204000 2145381 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 1661 |
+
W0621 22:08:33.204000 2145381 site-packages/torch/distributed/run.py:766] *****************************************
|
| 1662 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 1663 |
+
and will be removed in future. Use torchrun.
|
| 1664 |
+
Note that --use-env is set by default in torchrun.
|
| 1665 |
+
If your script expects `--local-rank` argument to be set, please
|
| 1666 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 1667 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 1668 |
+
further instructions
|
| 1669 |
+
|
| 1670 |
+
main()
|
| 1671 |
+
W0621 22:08:33.247000 799671 site-packages/torch/distributed/run.py:766]
|
| 1672 |
+
W0621 22:08:33.247000 799671 site-packages/torch/distributed/run.py:766] *****************************************
|
| 1673 |
+
W0621 22:08:33.247000 799671 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 1674 |
+
W0621 22:08:33.247000 799671 site-packages/torch/distributed/run.py:766] *****************************************
|
attnserver.run_attnserver.slurm.sh.343239.out.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343240.err.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343240.out.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343243.out.log
CHANGED
|
@@ -19170,3 +19170,88 @@ batch tensor after cp: loss_mask torch.Size([1, 20480])
|
|
| 19170 |
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19171 |
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19172 |
Start exporting trace 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19170 |
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19171 |
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19172 |
Start exporting trace 1
|
| 19173 |
+
Done exporting trace 1
|
| 19174 |
+
[2025-06-21 22:08:05] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 69290.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 19175 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 19176 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 19177 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 19178 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 19179 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 19180 |
+
batch tensor after cp: tokens torch.Size([1, 20480])
|
| 19181 |
+
batch tensor after cp: labels torch.Size([1, 20480])
|
| 19182 |
+
batch tensor after cp: loss_mask torch.Size([1, 20480])
|
| 19183 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19184 |
+
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19185 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 19186 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 19187 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 19188 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 19189 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 19190 |
+
batch tensor after cp: tokens torch.Size([1, 20480])
|
| 19191 |
+
batch tensor after cp: labels torch.Size([1, 20480])
|
| 19192 |
+
batch tensor after cp: loss_mask torch.Size([1, 20480])
|
| 19193 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19194 |
+
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19195 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 19196 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 19197 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 19198 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 19199 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 19200 |
+
batch tensor after cp: tokens torch.Size([1, 20480])
|
| 19201 |
+
batch tensor after cp: labels torch.Size([1, 20480])
|
| 19202 |
+
batch tensor after cp: loss_mask torch.Size([1, 20480])
|
| 19203 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19204 |
+
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19205 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 19206 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 19207 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 19208 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 19209 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 19210 |
+
batch tensor after cp: tokens torch.Size([1, 20480])
|
| 19211 |
+
batch tensor after cp: labels torch.Size([1, 20480])
|
| 19212 |
+
batch tensor after cp: loss_mask torch.Size([1, 20480])
|
| 19213 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19214 |
+
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19215 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 19216 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 19217 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 19218 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 19219 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 19220 |
+
batch tensor after cp: tokens torch.Size([1, 20480])
|
| 19221 |
+
batch tensor after cp: labels torch.Size([1, 20480])
|
| 19222 |
+
batch tensor after cp: loss_mask torch.Size([1, 20480])
|
| 19223 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19224 |
+
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19225 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 19226 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 19227 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 19228 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 19229 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 19230 |
+
batch tensor after cp: tokens torch.Size([1, 20480])
|
| 19231 |
+
batch tensor after cp: labels torch.Size([1, 20480])
|
| 19232 |
+
batch tensor after cp: loss_mask torch.Size([1, 20480])
|
| 19233 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19234 |
+
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19235 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 19236 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 19237 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 19238 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 19239 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 19240 |
+
batch tensor after cp: tokens torch.Size([1, 20480])
|
| 19241 |
+
batch tensor after cp: labels torch.Size([1, 20480])
|
| 19242 |
+
batch tensor after cp: loss_mask torch.Size([1, 20480])
|
| 19243 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19244 |
+
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19245 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 19246 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 19247 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 19248 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 19249 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 19250 |
+
batch tensor after cp: tokens torch.Size([1, 20480])
|
| 19251 |
+
batch tensor after cp: labels torch.Size([1, 20480])
|
| 19252 |
+
batch tensor after cp: loss_mask torch.Size([1, 20480])
|
| 19253 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 20480, 81920])
|
| 19254 |
+
batch tensor after cp: position_ids torch.Size([1, 20480])
|
| 19255 |
+
Start exporting trace 2
|
| 19256 |
+
Done exporting trace 2
|
| 19257 |
+
[2025-06-21 22:08:40] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 34210.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
attnserver.run_attnserver.slurm.sh.343244.err.log
CHANGED
|
@@ -3936,3 +3936,43 @@ W0621 22:07:13.434000 462698 site-packages/torch/distributed/run.py:766]
|
|
| 3936 |
W0621 22:07:13.434000 462698 site-packages/torch/distributed/run.py:766] *****************************************
|
| 3937 |
W0621 22:07:13.434000 462698 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 3938 |
W0621 22:07:13.434000 462698 site-packages/torch/distributed/run.py:766] *****************************************
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3936 |
W0621 22:07:13.434000 462698 site-packages/torch/distributed/run.py:766] *****************************************
|
| 3937 |
W0621 22:07:13.434000 462698 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 3938 |
W0621 22:07:13.434000 462698 site-packages/torch/distributed/run.py:766] *****************************************
|
| 3939 |
+
[rank3]:[W621 22:07:35.287546926 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 3940 |
+
[rank7]:[W621 22:07:35.287556554 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 3941 |
+
[rank1]:[W621 22:07:35.287576921 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 3942 |
+
[rank5]:[W621 22:07:35.287600314 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 3943 |
+
[rank2]:[W621 22:07:35.294269895 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 3944 |
+
[rank6]:[W621 22:07:35.294280883 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 3945 |
+
[rank4]:[W621 22:07:35.297098720 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 3946 |
+
[rank0]:[W621 22:07:35.484410809 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 3947 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 3948 |
+
warnings.warn(
|
| 3949 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 3950 |
+
warnings.warn(
|
| 3951 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 3952 |
+
warnings.warn(
|
| 3953 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 3954 |
+
warnings.warn(
|
| 3955 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 3956 |
+
warnings.warn(
|
| 3957 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 3958 |
+
warnings.warn(
|
| 3959 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 3960 |
+
warnings.warn(
|
| 3961 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 3962 |
+
warnings.warn(
|
| 3963 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 3964 |
+
warnings.warn(
|
| 3965 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 3966 |
+
warnings.warn(
|
| 3967 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 3968 |
+
warnings.warn(
|
| 3969 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 3970 |
+
warnings.warn(
|
| 3971 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 3972 |
+
warnings.warn(
|
| 3973 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 3974 |
+
warnings.warn(
|
| 3975 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 3976 |
+
warnings.warn(
|
| 3977 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 3978 |
+
warnings.warn(
|
attnserver.run_attnserver.slurm.sh.343244.out.log
CHANGED
|
@@ -15010,3 +15010,723 @@ CHECKPOINT_PATH: gpt-checkpoint
|
|
| 15010 |
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 15011 |
--------------------------------
|
| 15012 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15010 |
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 15011 |
--------------------------------
|
| 15012 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 15013 |
+
using world size: 8, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 2, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0
|
| 15014 |
+
Number of virtual stages per pipeline stage: None
|
| 15015 |
+
WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used
|
| 15016 |
+
using torch.float16 for parameters ...
|
| 15017 |
+
------------------------ arguments ------------------------
|
| 15018 |
+
account_for_embedding_in_pipeline_split ......... False
|
| 15019 |
+
account_for_loss_in_pipeline_split .............. False
|
| 15020 |
+
accumulate_allreduce_grads_in_fp32 .............. False
|
| 15021 |
+
adam_beta1 ...................................... 0.9
|
| 15022 |
+
adam_beta2 ...................................... 0.999
|
| 15023 |
+
adam_eps ........................................ 1e-08
|
| 15024 |
+
add_bias_linear ................................. True
|
| 15025 |
+
add_position_embedding .......................... True
|
| 15026 |
+
add_qkv_bias .................................... True
|
| 15027 |
+
adlr_autoresume ................................. False
|
| 15028 |
+
adlr_autoresume_interval ........................ 1000
|
| 15029 |
+
align_grad_reduce ............................... True
|
| 15030 |
+
align_param_gather .............................. False
|
| 15031 |
+
app_tag_run_name ................................ None
|
| 15032 |
+
app_tag_run_version ............................. 0.0.0
|
| 15033 |
+
apply_layernorm_1p .............................. False
|
| 15034 |
+
apply_query_key_layer_scaling ................... False
|
| 15035 |
+
apply_residual_connection_post_layernorm ........ False
|
| 15036 |
+
apply_rope_fusion ............................... False
|
| 15037 |
+
async_save ...................................... None
|
| 15038 |
+
async_tensor_model_parallel_allreduce ........... True
|
| 15039 |
+
attention_backend ............................... AttnBackend.auto
|
| 15040 |
+
attention_dropout ............................... 0.1
|
| 15041 |
+
attention_softmax_in_fp32 ....................... False
|
| 15042 |
+
auto_detect_ckpt_format ......................... False
|
| 15043 |
+
barrier_with_L1_time ............................ True
|
| 15044 |
+
bert_binary_head ................................ True
|
| 15045 |
+
bert_embedder_type .............................. megatron
|
| 15046 |
+
bert_load ....................................... None
|
| 15047 |
+
bf16 ............................................ False
|
| 15048 |
+
bias_dropout_fusion ............................. True
|
| 15049 |
+
bias_gelu_fusion ................................ True
|
| 15050 |
+
bias_swiglu_fusion .............................. True
|
| 15051 |
+
biencoder_projection_dim ........................ 0
|
| 15052 |
+
biencoder_shared_query_context_model ............ False
|
| 15053 |
+
block_data_path ................................. None
|
| 15054 |
+
calc_ft_timeouts ................................ False
|
| 15055 |
+
calculate_per_token_loss ........................ False
|
| 15056 |
+
check_for_large_grads ........................... False
|
| 15057 |
+
check_for_nan_in_loss_and_grad .................. False
|
| 15058 |
+
check_for_spiky_loss ............................ False
|
| 15059 |
+
check_weight_hash_across_dp_replicas_interval ... None
|
| 15060 |
+
ckpt_assume_constant_structure .................. False
|
| 15061 |
+
ckpt_convert_format ............................. None
|
| 15062 |
+
ckpt_convert_save ............................... None
|
| 15063 |
+
ckpt_convert_update_legacy_dist_opt_format ...... False
|
| 15064 |
+
ckpt_format ..................................... torch_dist
|
| 15065 |
+
ckpt_fully_parallel_load ........................ False
|
| 15066 |
+
ckpt_fully_parallel_save ........................ True
|
| 15067 |
+
ckpt_fully_parallel_save_deprecated ............. False
|
| 15068 |
+
ckpt_step ....................................... None
|
| 15069 |
+
classes_fraction ................................ 1.0
|
| 15070 |
+
clip_grad ....................................... 1.0
|
| 15071 |
+
clone_scatter_output_in_embedding ............... True
|
| 15072 |
+
config_logger_dir ...............................
|
| 15073 |
+
consumed_train_samples .......................... 0
|
| 15074 |
+
consumed_valid_samples .......................... 0
|
| 15075 |
+
context_parallel_size ........................... 4
|
| 15076 |
+
cp_comm_type .................................... ['p2p']
|
| 15077 |
+
create_attention_mask_in_dataloader ............. True
|
| 15078 |
+
cross_entropy_fusion_impl ....................... native
|
| 15079 |
+
cross_entropy_loss_fusion ....................... False
|
| 15080 |
+
cuda_graph_scope ................................ full
|
| 15081 |
+
cuda_graph_warmup_steps ......................... 3
|
| 15082 |
+
data_args_path .................................. None
|
| 15083 |
+
data_cache_path ................................. None
|
| 15084 |
+
data_parallel_random_init ....................... False
|
| 15085 |
+
data_parallel_sharding_strategy ................. no_shard
|
| 15086 |
+
data_parallel_size .............................. 1
|
| 15087 |
+
data_path ....................................... None
|
| 15088 |
+
data_per_class_fraction ......................... 1.0
|
| 15089 |
+
data_sharding ................................... True
|
| 15090 |
+
dataloader_type ................................. single
|
| 15091 |
+
ddp_average_in_collective ....................... False
|
| 15092 |
+
ddp_bucket_size ................................. None
|
| 15093 |
+
ddp_num_buckets ................................. None
|
| 15094 |
+
ddp_pad_buckets_for_high_nccl_busbw ............. False
|
| 15095 |
+
decoder_first_pipeline_num_layers ............... None
|
| 15096 |
+
decoder_last_pipeline_num_layers ................ None
|
| 15097 |
+
decoder_num_layers .............................. None
|
| 15098 |
+
decoder_seq_length .............................. None
|
| 15099 |
+
decoupled_lr .................................... None
|
| 15100 |
+
decoupled_min_lr ................................ None
|
| 15101 |
+
decrease_batch_size_if_needed ................... False
|
| 15102 |
+
defer_embedding_wgrad_compute ................... False
|
| 15103 |
+
deprecated_use_mcore_models ..................... False
|
| 15104 |
+
deterministic_mode .............................. False
|
| 15105 |
+
dino_bottleneck_size ............................ 256
|
| 15106 |
+
dino_freeze_last_layer .......................... 1
|
| 15107 |
+
dino_head_hidden_size ........................... 2048
|
| 15108 |
+
dino_local_crops_number ......................... 10
|
| 15109 |
+
dino_local_img_size ............................. 96
|
| 15110 |
+
dino_norm_last_layer ............................ False
|
| 15111 |
+
dino_teacher_temp ............................... 0.07
|
| 15112 |
+
dino_warmup_teacher_temp ........................ 0.04
|
| 15113 |
+
dino_warmup_teacher_temp_epochs ................. 30
|
| 15114 |
+
disable_bf16_reduced_precision_matmul ........... False
|
| 15115 |
+
disable_mamba_mem_eff_path ...................... False
|
| 15116 |
+
disable_straggler_on_startup .................... False
|
| 15117 |
+
dist_ckpt_format_deprecated ..................... None
|
| 15118 |
+
dist_ckpt_strictness ............................ assume_ok_unexpected
|
| 15119 |
+
distribute_saved_activations .................... False
|
| 15120 |
+
distributed_backend ............................. nccl
|
| 15121 |
+
distributed_timeout_minutes ..................... 10
|
| 15122 |
+
embedding_path .................................. None
|
| 15123 |
+
empty_unused_memory_level ....................... 0
|
| 15124 |
+
enable_cuda_graph ............................... False
|
| 15125 |
+
enable_ft_package ............................... False
|
| 15126 |
+
enable_gloo_process_groups ...................... True
|
| 15127 |
+
enable_msc ...................................... True
|
| 15128 |
+
enable_one_logger ............................... True
|
| 15129 |
+
encoder_num_layers .............................. 2
|
| 15130 |
+
encoder_pipeline_model_parallel_size ............ 0
|
| 15131 |
+
encoder_seq_length .............................. 65536
|
| 15132 |
+
encoder_tensor_model_parallel_size .............. 0
|
| 15133 |
+
end_weight_decay ................................ 0.1
|
| 15134 |
+
eod_mask_loss ................................... False
|
| 15135 |
+
error_injection_rate ............................ 0
|
| 15136 |
+
error_injection_type ............................ transient_error
|
| 15137 |
+
eval_interval ................................... 16
|
| 15138 |
+
eval_iters ...................................... 1
|
| 15139 |
+
evidence_data_path .............................. None
|
| 15140 |
+
exit_duration_in_mins ........................... None
|
| 15141 |
+
exit_interval ................................... None
|
| 15142 |
+
exit_on_missing_checkpoint ...................... False
|
| 15143 |
+
exit_signal_handler ............................. False
|
| 15144 |
+
exp_avg_dtype ................................... torch.float32
|
| 15145 |
+
exp_avg_sq_dtype ................................ torch.float32
|
| 15146 |
+
expert_model_parallel_size ...................... 1
|
| 15147 |
+
expert_tensor_parallel_size ..................... 2
|
| 15148 |
+
external_cuda_graph ............................. False
|
| 15149 |
+
ffn_hidden_size ................................. 16384
|
| 15150 |
+
finetune ........................................ False
|
| 15151 |
+
first_last_layers_bf16 .......................... False
|
| 15152 |
+
flash_decode .................................... False
|
| 15153 |
+
fp16 ............................................ True
|
| 15154 |
+
fp16_lm_cross_entropy ........................... False
|
| 15155 |
+
fp32_residual_connection ........................ False
|
| 15156 |
+
fp8 ............................................. None
|
| 15157 |
+
fp8_amax_compute_algo ........................... most_recent
|
| 15158 |
+
fp8_amax_history_len ............................ 1
|
| 15159 |
+
fp8_interval .................................... 1
|
| 15160 |
+
fp8_margin ...................................... 0
|
| 15161 |
+
fp8_param_gather ................................ False
|
| 15162 |
+
fp8_recipe ...................................... delayed
|
| 15163 |
+
fp8_wgrad ....................................... True
|
| 15164 |
+
fsdp_double_buffer .............................. False
|
| 15165 |
+
global_batch_size ............................... 1
|
| 15166 |
+
grad_reduce_in_bf16 ............................. False
|
| 15167 |
+
gradient_accumulation_fusion .................... True
|
| 15168 |
+
gradient_reduce_div_fusion ...................... True
|
| 15169 |
+
group_query_attention ........................... True
|
| 15170 |
+
head_lr_mult .................................... 1.0
|
| 15171 |
+
heterogeneous_layers_config_encoded_json ........ None
|
| 15172 |
+
heterogeneous_layers_config_path ................ None
|
| 15173 |
+
hidden_dropout .................................. 0.1
|
| 15174 |
+
hidden_size ..................................... 4096
|
| 15175 |
+
hierarchical_context_parallel_sizes ............. None
|
| 15176 |
+
high_priority_stream_groups ..................... []
|
| 15177 |
+
hybrid_attention_ratio .......................... 0.0
|
| 15178 |
+
hybrid_mlp_ratio ................................ 0.0
|
| 15179 |
+
hybrid_override_pattern ......................... None
|
| 15180 |
+
hysteresis ...................................... 2
|
| 15181 |
+
ict_head_size ................................... None
|
| 15182 |
+
ict_load ........................................ None
|
| 15183 |
+
img_h ........................................... 224
|
| 15184 |
+
img_w ........................................... 224
|
| 15185 |
+
indexer_batch_size .............................. 128
|
| 15186 |
+
indexer_log_interval ............................ 1000
|
| 15187 |
+
inference_batch_times_seqlen_threshold .......... -1
|
| 15188 |
+
inference_dynamic_batching ...................... False
|
| 15189 |
+
inference_dynamic_batching_buffer_guaranteed_fraction 0.2
|
| 15190 |
+
inference_dynamic_batching_buffer_overflow_factor None
|
| 15191 |
+
inference_dynamic_batching_buffer_size_gb ....... 40.0
|
| 15192 |
+
inference_dynamic_batching_chunk_size ........... 256
|
| 15193 |
+
inference_dynamic_batching_max_requests_override None
|
| 15194 |
+
inference_dynamic_batching_max_tokens_override .. None
|
| 15195 |
+
inference_max_batch_size ........................ 8
|
| 15196 |
+
inference_max_seq_length ........................ 2560
|
| 15197 |
+
inference_rng_tracker ........................... False
|
| 15198 |
+
init_method_std ................................. 0.02
|
| 15199 |
+
init_method_xavier_uniform ...................... False
|
| 15200 |
+
init_model_with_meta_device ..................... False
|
| 15201 |
+
initial_loss_scale .............................. 4294967296
|
| 15202 |
+
inprocess_active_world_size ..................... 8
|
| 15203 |
+
inprocess_barrier_timeout ....................... 120
|
| 15204 |
+
inprocess_completion_timeout .................... 120
|
| 15205 |
+
inprocess_empty_cuda_cache ...................... False
|
| 15206 |
+
inprocess_granularity ........................... node
|
| 15207 |
+
inprocess_hard_timeout .......................... 90
|
| 15208 |
+
inprocess_heartbeat_interval .................... 30
|
| 15209 |
+
inprocess_heartbeat_timeout ..................... 60
|
| 15210 |
+
inprocess_last_call_wait ........................ 1
|
| 15211 |
+
inprocess_max_iterations ........................ None
|
| 15212 |
+
inprocess_monitor_process_interval .............. 1.0
|
| 15213 |
+
inprocess_monitor_thread_interval ............... 1.0
|
| 15214 |
+
inprocess_progress_watchdog_interval ............ 1.0
|
| 15215 |
+
inprocess_restart ............................... False
|
| 15216 |
+
inprocess_soft_timeout .......................... 60
|
| 15217 |
+
inprocess_termination_grace_time ................ 1
|
| 15218 |
+
is_hybrid_model ................................. False
|
| 15219 |
+
iter_per_epoch .................................. 1250
|
| 15220 |
+
iterations_to_skip .............................. []
|
| 15221 |
+
keep_fp8_transpose_cache_when_using_custom_fsdp . False
|
| 15222 |
+
kv_channels ..................................... 64
|
| 15223 |
+
kv_lora_rank .................................... 32
|
| 15224 |
+
lazy_mpu_init ................................... None
|
| 15225 |
+
load ............................................ gpt-checkpoint
|
| 15226 |
+
load_model_opt_format ........................... False
|
| 15227 |
+
local_rank ...................................... 0
|
| 15228 |
+
log_interval .................................... 1
|
| 15229 |
+
log_loss_scale_to_tensorboard ................... True
|
| 15230 |
+
log_memory_to_tensorboard ....................... False
|
| 15231 |
+
log_num_zeros_in_grad ........................... False
|
| 15232 |
+
log_params_norm ................................. False
|
| 15233 |
+
log_progress .................................... False
|
| 15234 |
+
log_straggler ................................... False
|
| 15235 |
+
log_throughput .................................. False
|
| 15236 |
+
log_timers_to_tensorboard ....................... False
|
| 15237 |
+
log_validation_ppl_to_tensorboard ............... False
|
| 15238 |
+
log_world_size_to_tensorboard ................... False
|
| 15239 |
+
logging_level ................................... 0
|
| 15240 |
+
loss_scale ...................................... None
|
| 15241 |
+
loss_scale_window ............................... 1000
|
| 15242 |
+
lr .............................................. 0.0005
|
| 15243 |
+
lr_decay_iters .................................. 150000
|
| 15244 |
+
lr_decay_samples ................................ None
|
| 15245 |
+
lr_decay_style .................................. cosine
|
| 15246 |
+
lr_warmup_fraction .............................. None
|
| 15247 |
+
lr_warmup_init .................................. 0.0
|
| 15248 |
+
lr_warmup_iters ................................. 2
|
| 15249 |
+
lr_warmup_samples ............................... 0
|
| 15250 |
+
lr_wsd_decay_iters .............................. None
|
| 15251 |
+
lr_wsd_decay_samples ............................ None
|
| 15252 |
+
lr_wsd_decay_style .............................. exponential
|
| 15253 |
+
main_grads_dtype ................................ torch.float32
|
| 15254 |
+
main_params_dtype ............................... torch.float32
|
| 15255 |
+
make_vocab_size_divisible_by .................... 128
|
| 15256 |
+
mamba_head_dim .................................. 64
|
| 15257 |
+
mamba_num_groups ................................ 8
|
| 15258 |
+
mamba_num_heads ................................. None
|
| 15259 |
+
mamba_state_dim ................................. 128
|
| 15260 |
+
manual_gc ....................................... False
|
| 15261 |
+
manual_gc_eval .................................. True
|
| 15262 |
+
manual_gc_interval .............................. 0
|
| 15263 |
+
mask_factor ..................................... 1.0
|
| 15264 |
+
mask_prob ....................................... 0.15
|
| 15265 |
+
mask_type ....................................... random
|
| 15266 |
+
masked_softmax_fusion ........................... True
|
| 15267 |
+
max_position_embeddings ......................... 65536
|
| 15268 |
+
max_tokens_to_oom ............................... 12000
|
| 15269 |
+
memory_snapshot_path ............................ snapshot.pickle
|
| 15270 |
+
merge_file ...................................... merges.txt
|
| 15271 |
+
micro_batch_size ................................ 1
|
| 15272 |
+
microbatch_group_size_per_vp_stage .............. None
|
| 15273 |
+
mid_level_dataset_surplus ....................... 0.005
|
| 15274 |
+
min_loss_scale .................................. 1.0
|
| 15275 |
+
min_lr .......................................... 0.0
|
| 15276 |
+
mlp_chunks_for_prefill .......................... 1
|
| 15277 |
+
mmap_bin_files .................................. True
|
| 15278 |
+
mock_data ....................................... True
|
| 15279 |
+
moe_apply_probs_on_input ........................ False
|
| 15280 |
+
moe_aux_loss_coeff .............................. 0.0
|
| 15281 |
+
moe_enable_deepep ............................... False
|
| 15282 |
+
moe_expert_capacity_factor ...................... None
|
| 15283 |
+
moe_extended_tp ................................. False
|
| 15284 |
+
moe_ffn_hidden_size ............................. None
|
| 15285 |
+
moe_grouped_gemm ................................ False
|
| 15286 |
+
moe_input_jitter_eps ............................ None
|
| 15287 |
+
moe_layer_freq .................................. 1
|
| 15288 |
+
moe_layer_recompute ............................. False
|
| 15289 |
+
moe_pad_expert_input_to_capacity ................ False
|
| 15290 |
+
moe_per_layer_logging ........................... False
|
| 15291 |
+
moe_permute_fusion .............................. False
|
| 15292 |
+
moe_router_bias_update_rate ..................... 0.001
|
| 15293 |
+
moe_router_dtype ................................ None
|
| 15294 |
+
moe_router_enable_expert_bias ................... False
|
| 15295 |
+
moe_router_force_load_balancing ................. False
|
| 15296 |
+
moe_router_group_topk ........................... None
|
| 15297 |
+
moe_router_load_balancing_type .................. aux_loss
|
| 15298 |
+
moe_router_num_groups ........................... None
|
| 15299 |
+
moe_router_padding_for_fp8 ...................... False
|
| 15300 |
+
moe_router_pre_softmax .......................... False
|
| 15301 |
+
moe_router_score_function ....................... softmax
|
| 15302 |
+
moe_router_topk ................................. 2
|
| 15303 |
+
moe_router_topk_scaling_factor .................. None
|
| 15304 |
+
moe_shared_expert_intermediate_size ............. None
|
| 15305 |
+
moe_shared_expert_overlap ....................... False
|
| 15306 |
+
moe_token_dispatcher_type ....................... allgather
|
| 15307 |
+
moe_token_drop_policy ........................... probs
|
| 15308 |
+
moe_use_legacy_grouped_gemm ..................... False
|
| 15309 |
+
moe_use_upcycling ............................... False
|
| 15310 |
+
moe_z_loss_coeff ................................ None
|
| 15311 |
+
mrope_section ................................... None
|
| 15312 |
+
mscale .......................................... 1.0
|
| 15313 |
+
mscale_all_dim .................................. 1.0
|
| 15314 |
+
mtp_loss_scaling_factor ......................... 0.1
|
| 15315 |
+
mtp_num_layers .................................. None
|
| 15316 |
+
multi_latent_attention .......................... False
|
| 15317 |
+
nccl_all_reduce_for_prefill ..................... False
|
| 15318 |
+
nccl_communicator_config_path ................... None
|
| 15319 |
+
nccl_ub ......................................... False
|
| 15320 |
+
no_load_optim ................................... None
|
| 15321 |
+
no_load_rng ..................................... None
|
| 15322 |
+
no_persist_layer_norm ........................... False
|
| 15323 |
+
no_rope_freq .................................... None
|
| 15324 |
+
no_save_optim ................................... None
|
| 15325 |
+
no_save_rng ..................................... None
|
| 15326 |
+
non_persistent_ckpt_type ........................ None
|
| 15327 |
+
non_persistent_global_ckpt_dir .................. None
|
| 15328 |
+
non_persistent_local_ckpt_algo .................. fully_parallel
|
| 15329 |
+
non_persistent_local_ckpt_dir ................... None
|
| 15330 |
+
non_persistent_save_interval .................... None
|
| 15331 |
+
norm_epsilon .................................... 1e-05
|
| 15332 |
+
normalization ................................... LayerNorm
|
| 15333 |
+
num_attention_heads ............................. 64
|
| 15334 |
+
num_channels .................................... 3
|
| 15335 |
+
num_classes ..................................... 1000
|
| 15336 |
+
num_dataset_builder_threads ..................... 1
|
| 15337 |
+
num_distributed_optimizer_instances ............. 1
|
| 15338 |
+
num_experts ..................................... None
|
| 15339 |
+
num_layers ...................................... 2
|
| 15340 |
+
num_layers_at_end_in_bf16 ....................... 1
|
| 15341 |
+
num_layers_at_start_in_bf16 ..................... 1
|
| 15342 |
+
num_layers_per_virtual_pipeline_stage ........... None
|
| 15343 |
+
num_query_groups ................................ 16
|
| 15344 |
+
num_virtual_stages_per_pipeline_rank ............ None
|
| 15345 |
+
num_workers ..................................... 2
|
| 15346 |
+
object_storage_cache_path ....................... None
|
| 15347 |
+
one_logger_async ................................ False
|
| 15348 |
+
one_logger_project .............................. megatron-lm
|
| 15349 |
+
one_logger_run_name ............................. None
|
| 15350 |
+
onnx_safe ....................................... None
|
| 15351 |
+
openai_gelu ..................................... False
|
| 15352 |
+
optimizer ....................................... adam
|
| 15353 |
+
optimizer_cpu_offload ........................... False
|
| 15354 |
+
optimizer_offload_fraction ...................... 1.0
|
| 15355 |
+
output_bert_embeddings .......................... False
|
| 15356 |
+
overlap_cpu_optimizer_d2h_h2d ................... False
|
| 15357 |
+
overlap_grad_reduce ............................. False
|
| 15358 |
+
overlap_p2p_comm ................................ False
|
| 15359 |
+
overlap_p2p_comm_warmup_flush ................... False
|
| 15360 |
+
overlap_param_gather ............................ False
|
| 15361 |
+
overlap_param_gather_with_optimizer_step ........ False
|
| 15362 |
+
override_opt_param_scheduler .................... False
|
| 15363 |
+
params_dtype .................................... torch.float16
|
| 15364 |
+
patch_dim ....................................... 16
|
| 15365 |
+
per_split_data_args_path ........................ None
|
| 15366 |
+
perform_initialization .......................... True
|
| 15367 |
+
pin_cpu_grads ................................... True
|
| 15368 |
+
pin_cpu_params .................................. True
|
| 15369 |
+
pipeline_model_parallel_comm_backend ............ None
|
| 15370 |
+
pipeline_model_parallel_size .................... 1
|
| 15371 |
+
pipeline_model_parallel_split_rank .............. None
|
| 15372 |
+
position_embedding_type ......................... learned_absolute
|
| 15373 |
+
pretrained_checkpoint ........................... None
|
| 15374 |
+
profile ......................................... False
|
| 15375 |
+
profile_ranks ................................... [0]
|
| 15376 |
+
profile_step_end ................................ 12
|
| 15377 |
+
profile_step_start .............................. 10
|
| 15378 |
+
q_lora_rank ..................................... None
|
| 15379 |
+
qk_head_dim ..................................... 128
|
| 15380 |
+
qk_l2_norm ...................................... False
|
| 15381 |
+
qk_layernorm .................................... False
|
| 15382 |
+
qk_pos_emb_head_dim ............................. 64
|
| 15383 |
+
query_in_block_prob ............................. 0.1
|
| 15384 |
+
rampup_batch_size ............................... None
|
| 15385 |
+
rank ............................................ 0
|
| 15386 |
+
recompute_granularity ........................... None
|
| 15387 |
+
recompute_method ................................ None
|
| 15388 |
+
recompute_modules ............................... None
|
| 15389 |
+
recompute_num_layers ............................ None
|
| 15390 |
+
record_memory_history ........................... False
|
| 15391 |
+
relative_attention_max_distance ................. 128
|
| 15392 |
+
relative_attention_num_buckets .................. 32
|
| 15393 |
+
replication ..................................... False
|
| 15394 |
+
replication_factor .............................. 2
|
| 15395 |
+
replication_jump ................................ None
|
| 15396 |
+
rerun_mode ...................................... disabled
|
| 15397 |
+
reset_attention_mask ............................ False
|
| 15398 |
+
reset_position_ids .............................. False
|
| 15399 |
+
result_rejected_tracker_filename ................ None
|
| 15400 |
+
retriever_report_topk_accuracies ................ []
|
| 15401 |
+
retriever_score_scaling ......................... False
|
| 15402 |
+
retriever_seq_length ............................ 256
|
| 15403 |
+
retro_add_retriever ............................. False
|
| 15404 |
+
retro_attention_gate ............................ 1
|
| 15405 |
+
retro_cyclic_train_iters ........................ None
|
| 15406 |
+
retro_encoder_attention_dropout ................. 0.1
|
| 15407 |
+
retro_encoder_hidden_dropout .................... 0.1
|
| 15408 |
+
retro_encoder_layers ............................ 2
|
| 15409 |
+
retro_num_neighbors ............................. 2
|
| 15410 |
+
retro_num_retrieved_chunks ...................... 2
|
| 15411 |
+
retro_project_dir ............................... None
|
| 15412 |
+
retro_verify_neighbor_count ..................... True
|
| 15413 |
+
rope_scaling_factor ............................. 8.0
|
| 15414 |
+
rotary_base ..................................... 10000
|
| 15415 |
+
rotary_interleaved .............................. False
|
| 15416 |
+
rotary_percent .................................. 1.0
|
| 15417 |
+
rotary_scaling_factor ........................... 1.0
|
| 15418 |
+
rotary_seq_len_interpolation_factor ............. None
|
| 15419 |
+
run_workload_inspector_server ................... False
|
| 15420 |
+
sample_rate ..................................... 1.0
|
| 15421 |
+
save ............................................ gpt-checkpoint
|
| 15422 |
+
save_interval ................................... 16
|
| 15423 |
+
scatter_gather_tensors_in_pipeline .............. True
|
| 15424 |
+
seed ............................................ 1234
|
| 15425 |
+
seq_length ...................................... 65536
|
| 15426 |
+
sequence_parallel ............................... False
|
| 15427 |
+
sgd_momentum .................................... 0.9
|
| 15428 |
+
short_seq_prob .................................. 0.1
|
| 15429 |
+
skip_train ...................................... False
|
| 15430 |
+
skipped_train_samples ........................... 0
|
| 15431 |
+
spec ............................................ None
|
| 15432 |
+
split ........................................... None
|
| 15433 |
+
squared_relu .................................... False
|
| 15434 |
+
start_weight_decay .............................. 0.1
|
| 15435 |
+
straggler_ctrlr_port ............................ 65535
|
| 15436 |
+
straggler_minmax_count .......................... 1
|
| 15437 |
+
suggested_communication_unit_size ............... None
|
| 15438 |
+
swiglu .......................................... False
|
| 15439 |
+
swin_backbone_type .............................. tiny
|
| 15440 |
+
symmetric_ar_type ............................... None
|
| 15441 |
+
te_rng_tracker .................................. False
|
| 15442 |
+
tensor_model_parallel_size ...................... 2
|
| 15443 |
+
tensorboard_dir ................................. tensorboard-logs/
|
| 15444 |
+
tensorboard_log_interval ........................ 1
|
| 15445 |
+
tensorboard_queue_size .......................... 1000
|
| 15446 |
+
test_data_path .................................. None
|
| 15447 |
+
test_mode ....................................... False
|
| 15448 |
+
tiktoken_num_special_tokens ..................... 1000
|
| 15449 |
+
tiktoken_pattern ................................ None
|
| 15450 |
+
tiktoken_special_tokens ......................... None
|
| 15451 |
+
timing_log_level ................................ 0
|
| 15452 |
+
timing_log_option ............................... minmax
|
| 15453 |
+
titles_data_path ................................ None
|
| 15454 |
+
tokenizer_model ................................. None
|
| 15455 |
+
tokenizer_type .................................. GPT2BPETokenizer
|
| 15456 |
+
torch_fsdp2_reshard_after_forward ............... True
|
| 15457 |
+
tp_comm_bootstrap_backend ....................... nccl
|
| 15458 |
+
tp_comm_bulk_dgrad .............................. True
|
| 15459 |
+
tp_comm_bulk_wgrad .............................. True
|
| 15460 |
+
tp_comm_overlap ................................. False
|
| 15461 |
+
tp_comm_overlap_ag .............................. True
|
| 15462 |
+
tp_comm_overlap_cfg ............................. None
|
| 15463 |
+
tp_comm_overlap_rs .............................. True
|
| 15464 |
+
tp_comm_overlap_rs_dgrad ........................ False
|
| 15465 |
+
tp_comm_split_ag ................................ True
|
| 15466 |
+
tp_comm_split_rs ................................ True
|
| 15467 |
+
train_data_path ................................. None
|
| 15468 |
+
train_iters ..................................... 10
|
| 15469 |
+
train_samples ................................... None
|
| 15470 |
+
train_sync_interval ............................. None
|
| 15471 |
+
transformer_impl ................................ transformer_engine
|
| 15472 |
+
transformer_pipeline_model_parallel_size ........ 1
|
| 15473 |
+
untie_embeddings_and_output_weights ............. False
|
| 15474 |
+
use_checkpoint_args ............................. False
|
| 15475 |
+
use_checkpoint_opt_param_scheduler .............. False
|
| 15476 |
+
use_cpu_initialization .......................... None
|
| 15477 |
+
use_custom_fsdp ................................. False
|
| 15478 |
+
use_dist_ckpt ................................... True
|
| 15479 |
+
use_dist_ckpt_deprecated ........................ False
|
| 15480 |
+
use_distributed_optimizer ....................... False
|
| 15481 |
+
use_flash_attn .................................. False
|
| 15482 |
+
use_legacy_models ............................... False
|
| 15483 |
+
use_mp_args_from_checkpoint_args ................ False
|
| 15484 |
+
use_one_sent_docs ............................... False
|
| 15485 |
+
use_persistent_ckpt_worker ...................... False
|
| 15486 |
+
use_precision_aware_optimizer ................... False
|
| 15487 |
+
use_pytorch_profiler ............................ False
|
| 15488 |
+
use_ring_exchange_p2p ........................... False
|
| 15489 |
+
use_rope_scaling ................................ False
|
| 15490 |
+
use_rotary_position_embeddings .................. False
|
| 15491 |
+
use_sharp ....................................... False
|
| 15492 |
+
use_tokenizer_model_from_checkpoint_args ........ True
|
| 15493 |
+
use_torch_fsdp2 ................................. False
|
| 15494 |
+
use_torch_optimizer_for_cpu_offload ............. False
|
| 15495 |
+
use_tp_pp_dp_mapping ............................ False
|
| 15496 |
+
v_head_dim ...................................... 128
|
| 15497 |
+
valid_data_path ................................. None
|
| 15498 |
+
variable_seq_lengths ............................ False
|
| 15499 |
+
virtual_pipeline_model_parallel_size ............ None
|
| 15500 |
+
vision_backbone_type ............................ vit
|
| 15501 |
+
vision_pretraining .............................. False
|
| 15502 |
+
vision_pretraining_type ......................... classify
|
| 15503 |
+
vocab_extra_ids ................................. 0
|
| 15504 |
+
vocab_file ...................................... vocab.json
|
| 15505 |
+
vocab_size ...................................... None
|
| 15506 |
+
wandb_exp_name ..................................
|
| 15507 |
+
wandb_project ...................................
|
| 15508 |
+
wandb_save_dir ..................................
|
| 15509 |
+
weight_decay .................................... 0.1
|
| 15510 |
+
weight_decay_incr_style ......................... constant
|
| 15511 |
+
wgrad_deferral_limit ............................ 0
|
| 15512 |
+
world_size ...................................... 8
|
| 15513 |
+
yaml_cfg ........................................ None
|
| 15514 |
+
-------------------- end of arguments ---------------------
|
| 15515 |
+
INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1
|
| 15516 |
+
> building GPT2BPETokenizer tokenizer ...
|
| 15517 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 15518 |
+
> padded vocab (size: 50257) with 175 dummy tokens (new size: 50432)
|
| 15519 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 15520 |
+
WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED
|
| 15521 |
+
> initializing torch distributed ...
|
| 15522 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 15523 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 15524 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 15525 |
+
WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written.
|
| 15526 |
+
WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it
|
| 15527 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 15528 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 15529 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 15530 |
+
> initialized tensor model parallel with size 2
|
| 15531 |
+
> initialized pipeline model parallel with size 1
|
| 15532 |
+
> setting random seeds to 1234 ...
|
| 15533 |
+
> compiling dataset index builder ...
|
| 15534 |
+
make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
|
| 15535 |
+
make: Nothing to be done for 'default'.
|
| 15536 |
+
make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
|
| 15537 |
+
>>> done with dataset index builder. Compilation time: 0.043 seconds
|
| 15538 |
+
WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations.
|
| 15539 |
+
> compiling and loading fused kernels ...
|
| 15540 |
+
>>> done with compiling and loading fused kernels. Compilation time: 2.203 seconds
|
| 15541 |
+
time to initialize megatron (seconds): 8.105
|
| 15542 |
+
[after megatron is initialized] datetime: 2025-06-21 22:07:42
|
| 15543 |
+
building GPT model ...
|
| 15544 |
+
>>> embedding
|
| 15545 |
+
>>> decoder
|
| 15546 |
+
>>> output_layer
|
| 15547 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 547960832
|
| 15548 |
+
>>> embedding
|
| 15549 |
+
>>> decoder
|
| 15550 |
+
>>> output_layer
|
| 15551 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 547960832
|
| 15552 |
+
INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False)
|
| 15553 |
+
INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1
|
| 15554 |
+
Params for bucket 1 (547960832 elements, 547960832 padded size):
|
| 15555 |
+
module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias
|
| 15556 |
+
module.embedding.position_embeddings.weight
|
| 15557 |
+
module.decoder.layers.1.mlp.linear_fc1.bias
|
| 15558 |
+
module.decoder.layers.0.mlp.linear_fc2.weight
|
| 15559 |
+
module.decoder.layers.0.mlp.linear_fc1.bias
|
| 15560 |
+
module.embedding.word_embeddings.weight
|
| 15561 |
+
module.decoder.final_layernorm.bias
|
| 15562 |
+
module.decoder.layers.1.self_attention.linear_qkv.weight
|
| 15563 |
+
module.decoder.layers.1.self_attention.linear_proj.weight
|
| 15564 |
+
module.decoder.layers.0.self_attention.linear_qkv.bias
|
| 15565 |
+
module.decoder.layers.1.mlp.linear_fc2.weight
|
| 15566 |
+
module.decoder.layers.1.self_attention.linear_proj.bias
|
| 15567 |
+
module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias
|
| 15568 |
+
module.decoder.final_layernorm.weight
|
| 15569 |
+
module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias
|
| 15570 |
+
module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias
|
| 15571 |
+
module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight
|
| 15572 |
+
module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight
|
| 15573 |
+
module.decoder.layers.1.self_attention.linear_qkv.bias
|
| 15574 |
+
module.decoder.layers.0.mlp.linear_fc2.bias
|
| 15575 |
+
module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight
|
| 15576 |
+
module.decoder.layers.1.mlp.linear_fc1.weight
|
| 15577 |
+
module.decoder.layers.0.mlp.linear_fc1.weight
|
| 15578 |
+
module.decoder.layers.0.self_attention.linear_proj.weight
|
| 15579 |
+
module.decoder.layers.1.mlp.linear_fc2.bias
|
| 15580 |
+
module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight
|
| 15581 |
+
module.decoder.layers.0.self_attention.linear_qkv.weight
|
| 15582 |
+
module.decoder.layers.0.self_attention.linear_proj.bias
|
| 15583 |
+
INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=<megatron.core.timers.Timers object at 0x14648e78ab40>, config_logger_dir='')
|
| 15584 |
+
INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine
|
| 15585 |
+
>>> embedding
|
| 15586 |
+
>>> decoder
|
| 15587 |
+
>>> output_layer
|
| 15588 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 547960832
|
| 15589 |
+
>>> embedding
|
| 15590 |
+
>>> decoder
|
| 15591 |
+
>>> output_layer
|
| 15592 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 547960832
|
| 15593 |
+
>>> embedding
|
| 15594 |
+
>>> decoder
|
| 15595 |
+
>>> output_layer
|
| 15596 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 547960832
|
| 15597 |
+
>>> embedding
|
| 15598 |
+
>>> decoder
|
| 15599 |
+
>>> output_layer
|
| 15600 |
+
>>> embedding
|
| 15601 |
+
>>> decoder
|
| 15602 |
+
>>> output_layer
|
| 15603 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 547960832
|
| 15604 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 547960832
|
| 15605 |
+
>>> embedding
|
| 15606 |
+
>>> decoder
|
| 15607 |
+
>>> output_layer
|
| 15608 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 547960832
|
| 15609 |
+
WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt
|
| 15610 |
+
will not load any checkpoints and will start from random
|
| 15611 |
+
(min, max) time across ranks (ms):
|
| 15612 |
+
load-checkpoint ................................: (3.34, 3.61)
|
| 15613 |
+
[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:07:45
|
| 15614 |
+
> building train, validation, and test datasets ...
|
| 15615 |
+
> datasets target sizes (minimum size):
|
| 15616 |
+
train: 10
|
| 15617 |
+
validation: 1
|
| 15618 |
+
test: 1
|
| 15619 |
+
INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None
|
| 15620 |
+
INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True
|
| 15621 |
+
INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)]
|
| 15622 |
+
> building train, validation, and test datasets for GPT ...
|
| 15623 |
+
INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=65536, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=<megatron.training.tokenizer.tokenizer._GPT2BPETokenizer object at 0x146492c8d430>, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None)
|
| 15624 |
+
INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices
|
| 15625 |
+
DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False
|
| 15626 |
+
WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None
|
| 15627 |
+
DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005054 seconds
|
| 15628 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1040
|
| 15629 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1
|
| 15630 |
+
INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices
|
| 15631 |
+
DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False
|
| 15632 |
+
WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None
|
| 15633 |
+
DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001612 seconds
|
| 15634 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1040
|
| 15635 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1
|
| 15636 |
+
INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices
|
| 15637 |
+
DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False
|
| 15638 |
+
WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None
|
| 15639 |
+
DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001329 seconds
|
| 15640 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1041
|
| 15641 |
+
INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1
|
| 15642 |
+
> finished creating GPT datasets ...
|
| 15643 |
+
[after dataloaders are built] datetime: 2025-06-21 22:07:45
|
| 15644 |
+
done with setup ...
|
| 15645 |
+
training ...
|
| 15646 |
+
(min, max) time across ranks (ms):
|
| 15647 |
+
model-and-optimizer-setup ......................: (3145.17, 3164.84)
|
| 15648 |
+
train/valid/test-data-iterators-setup ..........: (20.84, 151.43)
|
| 15649 |
+
Setting rerun_state_machine.current_iteration to 0...
|
| 15650 |
+
[before the start of training step] datetime: 2025-06-21 22:07:45
|
| 15651 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 15652 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 15653 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 15654 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 15655 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 15656 |
+
batch tensor after cp: tokens torch.Size([2, 32768])
|
| 15657 |
+
batch tensor after cp: labels torch.Size([2, 32768])
|
| 15658 |
+
batch tensor after cp: loss_mask torch.Size([2, 32768])
|
| 15659 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 32768, 131072])
|
| 15660 |
+
batch tensor after cp: position_ids torch.Size([2, 32768])
|
| 15661 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 15662 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 15663 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 15664 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 15665 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 15666 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 15667 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 15668 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 15669 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 15670 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 15671 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 15672 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 15673 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 15674 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 15675 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 15676 |
+
batch tensor after cp: tokens torch.Size([2, 32768])
|
| 15677 |
+
batch tensor after cp: labels torch.Size([2, 32768])
|
| 15678 |
+
batch tensor after cp: loss_mask torch.Size([2, 32768])
|
| 15679 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 32768, 131072])
|
| 15680 |
+
batch tensor after cp: position_ids torch.Size([2, 32768])
|
| 15681 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 15682 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 15683 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 15684 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 15685 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 15686 |
+
batch tensor after cp: tokens torch.Size([2, 32768])
|
| 15687 |
+
batch tensor after cp: labels torch.Size([2, 32768])
|
| 15688 |
+
batch tensor after cp: loss_mask torch.Size([2, 32768])
|
| 15689 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 32768, 131072])
|
| 15690 |
+
batch tensor after cp: position_ids torch.Size([2, 32768])
|
| 15691 |
+
batch tensor after cp: tokens torch.Size([2, 32768])
|
| 15692 |
+
batch tensor after cp: labels torch.Size([2, 32768])
|
| 15693 |
+
batch tensor after cp: loss_mask torch.Size([2, 32768])
|
| 15694 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 32768, 131072])
|
| 15695 |
+
batch tensor after cp: position_ids torch.Size([2, 32768])
|
| 15696 |
+
batch tensor after cp: tokens torch.Size([2, 32768])
|
| 15697 |
+
batch tensor after cp: labels torch.Size([2, 32768])
|
| 15698 |
+
batch tensor after cp: loss_mask torch.Size([2, 32768])
|
| 15699 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 32768, 131072])
|
| 15700 |
+
batch tensor after cp: position_ids torch.Size([2, 32768])
|
| 15701 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 15702 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 15703 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 15704 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 15705 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 15706 |
+
batch tensor after cp: tokens torch.Size([2, 32768])
|
| 15707 |
+
batch tensor after cp: labels torch.Size([2, 32768])
|
| 15708 |
+
batch tensor after cp: loss_mask torch.Size([2, 32768])
|
| 15709 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 32768, 131072])
|
| 15710 |
+
batch tensor after cp: position_ids torch.Size([2, 32768])
|
| 15711 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 15712 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 15713 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 15714 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 15715 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 15716 |
+
batch tensor after cp: tokens torch.Size([2, 32768])
|
| 15717 |
+
batch tensor after cp: labels torch.Size([2, 32768])
|
| 15718 |
+
batch tensor after cp: loss_mask torch.Size([2, 32768])
|
| 15719 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 32768, 131072])
|
| 15720 |
+
batch tensor after cp: position_ids torch.Size([2, 32768])
|
| 15721 |
+
batch tensor: tokens torch.Size([2, 131072])
|
| 15722 |
+
batch tensor: labels torch.Size([2, 131072])
|
| 15723 |
+
batch tensor: loss_mask torch.Size([2, 131072])
|
| 15724 |
+
batch tensor: attention_mask torch.Size([2, 1, 131072, 131072])
|
| 15725 |
+
batch tensor: position_ids torch.Size([2, 131072])
|
| 15726 |
+
batch tensor after cp: tokens torch.Size([2, 32768])
|
| 15727 |
+
batch tensor after cp: labels torch.Size([2, 32768])
|
| 15728 |
+
batch tensor after cp: loss_mask torch.Size([2, 32768])
|
| 15729 |
+
batch tensor after cp: attention_mask torch.Size([2, 1, 32768, 131072])
|
| 15730 |
+
batch tensor after cp: position_ids torch.Size([2, 32768])
|
| 15731 |
+
Start exporting trace 0
|
| 15732 |
+
Done exporting trace 0
|
attnserver.run_attnserver.slurm.sh.343248.err.log
CHANGED
|
@@ -1979,3 +1979,671 @@ W0621 22:07:02.709000 2401561 site-packages/torch/distributed/run.py:766] ******
|
|
| 1979 |
[rank2]:[W621 22:07:25.571074360 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1980 |
[rank4]:[W621 22:07:25.571438088 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1981 |
[rank0]:[W621 22:07:25.706691733 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1979 |
[rank2]:[W621 22:07:25.571074360 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1980 |
[rank4]:[W621 22:07:25.571438088 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1981 |
[rank0]:[W621 22:07:25.706691733 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 1982 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1983 |
+
warnings.warn(
|
| 1984 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1985 |
+
warnings.warn(
|
| 1986 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1987 |
+
warnings.warn(
|
| 1988 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1989 |
+
warnings.warn(
|
| 1990 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1991 |
+
warnings.warn(
|
| 1992 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1993 |
+
warnings.warn(
|
| 1994 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1995 |
+
warnings.warn(
|
| 1996 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 1997 |
+
warnings.warn(
|
| 1998 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 1999 |
+
warnings.warn(
|
| 2000 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2001 |
+
warnings.warn(
|
| 2002 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2003 |
+
warnings.warn(
|
| 2004 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2005 |
+
warnings.warn(
|
| 2006 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2007 |
+
warnings.warn(
|
| 2008 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2009 |
+
warnings.warn(
|
| 2010 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2011 |
+
warnings.warn(
|
| 2012 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2013 |
+
warnings.warn(
|
| 2014 |
+
[rank3]: Traceback (most recent call last):
|
| 2015 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2016 |
+
[rank3]: pretrain(
|
| 2017 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2018 |
+
[rank3]: iteration, num_floating_point_operations_so_far = train(
|
| 2019 |
+
[rank3]: ^^^^^^
|
| 2020 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2021 |
+
[rank3]: ) = train_step(
|
| 2022 |
+
[rank3]: ^^^^^^^^^^^
|
| 2023 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2024 |
+
[rank3]: losses_reduced = forward_backward_func(
|
| 2025 |
+
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2026 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2027 |
+
[rank3]: output_tensor, num_tokens = forward_step(
|
| 2028 |
+
[rank3]: ^^^^^^^^^^^^^
|
| 2029 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2030 |
+
[rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2031 |
+
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2032 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2033 |
+
[rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2034 |
+
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2035 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2036 |
+
[rank3]: batch = next(global_batches)
|
| 2037 |
+
[rank3]: ^^^^^^^^^^^^^^^^^^^^
|
| 2038 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2039 |
+
[rank3]: attention_mask = torch.ones(
|
| 2040 |
+
[rank3]: ^^^^^^^^^^^
|
| 2041 |
+
[rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 134.86 GiB is free. Including non-PyTorch memory, this process has 4.95 GiB memory in use. Of the allocated memory 3.30 GiB is allocated by PyTorch, and 193.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2042 |
+
[rank5]: Traceback (most recent call last):
|
| 2043 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2044 |
+
[rank5]: pretrain(
|
| 2045 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2046 |
+
[rank5]: iteration, num_floating_point_operations_so_far = train(
|
| 2047 |
+
[rank5]: ^^^^^^
|
| 2048 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2049 |
+
[rank5]: ) = train_step(
|
| 2050 |
+
[rank5]: ^^^^^^^^^^^
|
| 2051 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2052 |
+
[rank5]: losses_reduced = forward_backward_func(
|
| 2053 |
+
[rank5]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2054 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2055 |
+
[rank5]: output_tensor, num_tokens = forward_step(
|
| 2056 |
+
[rank5]: ^^^^^^^^^^^^^
|
| 2057 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2058 |
+
[rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2059 |
+
[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2060 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2061 |
+
[rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2062 |
+
[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2063 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2064 |
+
[rank5]: batch = next(global_batches)
|
| 2065 |
+
[rank5]: ^^^^^^^^^^^^^^^^^^^^
|
| 2066 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2067 |
+
[rank5]: attention_mask = torch.ones(
|
| 2068 |
+
[rank5]: ^^^^^^^^^^^
|
| 2069 |
+
[rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 134.86 GiB is free. Including non-PyTorch memory, this process has 4.95 GiB memory in use. Of the allocated memory 3.30 GiB is allocated by PyTorch, and 193.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2070 |
+
[rank4]: Traceback (most recent call last):
|
| 2071 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2072 |
+
[rank4]: pretrain(
|
| 2073 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2074 |
+
[rank4]: iteration, num_floating_point_operations_so_far = train(
|
| 2075 |
+
[rank4]: ^^^^^^
|
| 2076 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2077 |
+
[rank4]: ) = train_step(
|
| 2078 |
+
[rank4]: ^^^^^^^^^^^
|
| 2079 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2080 |
+
[rank4]: losses_reduced = forward_backward_func(
|
| 2081 |
+
[rank4]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2082 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2083 |
+
[rank4]: output_tensor, num_tokens = forward_step(
|
| 2084 |
+
[rank4]: ^^^^^^^^^^^^^
|
| 2085 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2086 |
+
[rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2087 |
+
[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2088 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2089 |
+
[rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2090 |
+
[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2091 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2092 |
+
[rank4]: batch = next(global_batches)
|
| 2093 |
+
[rank4]: ^^^^^^^^^^^^^^^^^^^^
|
| 2094 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2095 |
+
[rank4]: attention_mask = torch.ones(
|
| 2096 |
+
[rank4]: ^^^^^^^^^^^
|
| 2097 |
+
[rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 134.86 GiB is free. Including non-PyTorch memory, this process has 4.95 GiB memory in use. Of the allocated memory 3.30 GiB is allocated by PyTorch, and 193.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2098 |
+
[rank2]: Traceback (most recent call last):
|
| 2099 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2100 |
+
[rank2]: pretrain(
|
| 2101 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2102 |
+
[rank2]: iteration, num_floating_point_operations_so_far = train(
|
| 2103 |
+
[rank2]: ^^^^^^
|
| 2104 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2105 |
+
[rank2]: ) = train_step(
|
| 2106 |
+
[rank2]: ^^^^^^^^^^^
|
| 2107 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2108 |
+
[rank2]: losses_reduced = forward_backward_func(
|
| 2109 |
+
[rank2]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2110 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2111 |
+
[rank2]: output_tensor, num_tokens = forward_step(
|
| 2112 |
+
[rank2]: ^^^^^^^^^^^^^
|
| 2113 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2114 |
+
[rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2115 |
+
[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2116 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2117 |
+
[rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2118 |
+
[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2119 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2120 |
+
[rank2]: batch = next(global_batches)
|
| 2121 |
+
[rank2]: ^^^^^^^^^^^^^^^^^^^^
|
| 2122 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2123 |
+
[rank2]: attention_mask = torch.ones(
|
| 2124 |
+
[rank2]: ^^^^^^^^^^^
|
| 2125 |
+
[rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 134.86 GiB is free. Including non-PyTorch memory, this process has 4.95 GiB memory in use. Of the allocated memory 3.30 GiB is allocated by PyTorch, and 193.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2126 |
+
[rank1]: Traceback (most recent call last):
|
| 2127 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2128 |
+
[rank1]: pretrain(
|
| 2129 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2130 |
+
[rank1]: iteration, num_floating_point_operations_so_far = train(
|
| 2131 |
+
[rank1]: ^^^^^^
|
| 2132 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2133 |
+
[rank1]: ) = train_step(
|
| 2134 |
+
[rank1]: ^^^^^^^^^^^
|
| 2135 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2136 |
+
[rank1]: losses_reduced = forward_backward_func(
|
| 2137 |
+
[rank1]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2138 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2139 |
+
[rank1]: output_tensor, num_tokens = forward_step(
|
| 2140 |
+
[rank1]: ^^^^^^^^^^^^^
|
| 2141 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2142 |
+
[rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2143 |
+
[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2144 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2145 |
+
[rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2146 |
+
[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2147 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2148 |
+
[rank1]: batch = next(global_batches)
|
| 2149 |
+
[rank1]: ^^^^^^^^^^^^^^^^^^^^
|
| 2150 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2151 |
+
[rank1]: attention_mask = torch.ones(
|
| 2152 |
+
[rank1]: ^^^^^^^^^^^
|
| 2153 |
+
[rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 134.86 GiB is free. Including non-PyTorch memory, this process has 4.95 GiB memory in use. Of the allocated memory 3.30 GiB is allocated by PyTorch, and 193.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2154 |
+
[rank0]: Traceback (most recent call last):
|
| 2155 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2156 |
+
[rank0]: pretrain(
|
| 2157 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2158 |
+
[rank0]: iteration, num_floating_point_operations_so_far = train(
|
| 2159 |
+
[rank0]: ^^^^^^
|
| 2160 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2161 |
+
[rank0]: ) = train_step(
|
| 2162 |
+
[rank0]: ^^^^^^^^^^^
|
| 2163 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2164 |
+
[rank0]: losses_reduced = forward_backward_func(
|
| 2165 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2166 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2167 |
+
[rank0]: output_tensor, num_tokens = forward_step(
|
| 2168 |
+
[rank0]: ^^^^^^^^^^^^^
|
| 2169 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2170 |
+
[rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2171 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2172 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2173 |
+
[rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2174 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2175 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2176 |
+
[rank0]: batch = next(global_batches)
|
| 2177 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^
|
| 2178 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2179 |
+
[rank0]: attention_mask = torch.ones(
|
| 2180 |
+
[rank0]: ^^^^^^^^^^^
|
| 2181 |
+
[rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 134.86 GiB is free. Including non-PyTorch memory, this process has 4.95 GiB memory in use. Of the allocated memory 3.30 GiB is allocated by PyTorch, and 193.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2182 |
+
[rank6]: Traceback (most recent call last):
|
| 2183 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2184 |
+
[rank6]: pretrain(
|
| 2185 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2186 |
+
[rank6]: iteration, num_floating_point_operations_so_far = train(
|
| 2187 |
+
[rank6]: ^^^^^^
|
| 2188 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2189 |
+
[rank6]: ) = train_step(
|
| 2190 |
+
[rank6]: ^^^^^^^^^^^
|
| 2191 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2192 |
+
[rank6]: losses_reduced = forward_backward_func(
|
| 2193 |
+
[rank6]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2194 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2195 |
+
[rank6]: output_tensor, num_tokens = forward_step(
|
| 2196 |
+
[rank6]: ^^^^^^^^^^^^^
|
| 2197 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2198 |
+
[rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2199 |
+
[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2200 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2201 |
+
[rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2202 |
+
[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2203 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2204 |
+
[rank6]: batch = next(global_batches)
|
| 2205 |
+
[rank6]: ^^^^^^^^^^^^^^^^^^^^
|
| 2206 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2207 |
+
[rank6]: attention_mask = torch.ones(
|
| 2208 |
+
[rank6]: ^^^^^^^^^^^
|
| 2209 |
+
[rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 134.86 GiB is free. Including non-PyTorch memory, this process has 4.95 GiB memory in use. Of the allocated memory 3.30 GiB is allocated by PyTorch, and 193.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2210 |
+
[rank7]: Traceback (most recent call last):
|
| 2211 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2212 |
+
[rank7]: pretrain(
|
| 2213 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2214 |
+
[rank7]: iteration, num_floating_point_operations_so_far = train(
|
| 2215 |
+
[rank7]: ^^^^^^
|
| 2216 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2217 |
+
[rank7]: ) = train_step(
|
| 2218 |
+
[rank7]: ^^^^^^^^^^^
|
| 2219 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2220 |
+
[rank7]: losses_reduced = forward_backward_func(
|
| 2221 |
+
[rank7]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2222 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2223 |
+
[rank7]: output_tensor, num_tokens = forward_step(
|
| 2224 |
+
[rank7]: ^^^^^^^^^^^^^
|
| 2225 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2226 |
+
[rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2227 |
+
[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2228 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2229 |
+
[rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2230 |
+
[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2231 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2232 |
+
[rank7]: batch = next(global_batches)
|
| 2233 |
+
[rank7]: ^^^^^^^^^^^^^^^^^^^^
|
| 2234 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2235 |
+
[rank7]: attention_mask = torch.ones(
|
| 2236 |
+
[rank7]: ^^^^^^^^^^^
|
| 2237 |
+
[rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 18432.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 134.86 GiB is free. Including non-PyTorch memory, this process has 4.95 GiB memory in use. Of the allocated memory 3.30 GiB is allocated by PyTorch, and 193.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2238 |
+
[rank1]:[W621 22:07:35.083637830 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 2239 |
+
[rank3]:[W621 22:07:35.101784035 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 2240 |
+
[rank7]:[W621 22:07:35.119439484 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 2241 |
+
[rank5]:[W621 22:07:35.149691968 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 2242 |
+
W0621 22:07:37.317000 2401561 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2401632 closing signal SIGTERM
|
| 2243 |
+
W0621 22:07:37.319000 2401561 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2401634 closing signal SIGTERM
|
| 2244 |
+
W0621 22:07:37.323000 2401561 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2401635 closing signal SIGTERM
|
| 2245 |
+
W0621 22:07:37.324000 2401561 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2401636 closing signal SIGTERM
|
| 2246 |
+
W0621 22:07:37.341000 2401561 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2401637 closing signal SIGTERM
|
| 2247 |
+
W0621 22:07:37.342000 2401561 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2401638 closing signal SIGTERM
|
| 2248 |
+
W0621 22:07:37.355000 2401561 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2401639 closing signal SIGTERM
|
| 2249 |
+
E0621 22:07:37.530000 2401561 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 1 (pid: 2401633) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 2250 |
+
Traceback (most recent call last):
|
| 2251 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
| 2252 |
+
File "<frozen runpy>", line 88, in _run_code
|
| 2253 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
| 2254 |
+
main()
|
| 2255 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
| 2256 |
+
return arg(*args, **kwargs)
|
| 2257 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 2258 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
| 2259 |
+
launch(args)
|
| 2260 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
| 2261 |
+
run(args)
|
| 2262 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
| 2263 |
+
elastic_launch(
|
| 2264 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
| 2265 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 2266 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2267 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
| 2268 |
+
raise ChildFailedError(
|
| 2269 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 2270 |
+
============================================================
|
| 2271 |
+
./pretrain_gpt_profile.py FAILED
|
| 2272 |
+
------------------------------------------------------------
|
| 2273 |
+
Failures:
|
| 2274 |
+
<NO_OTHER_FAILURES>
|
| 2275 |
+
------------------------------------------------------------
|
| 2276 |
+
Root Cause (first observed failure):
|
| 2277 |
+
[0]:
|
| 2278 |
+
time : 2025-06-21_22:07:37
|
| 2279 |
+
host : fs-mbz-gpu-791
|
| 2280 |
+
rank : 1 (local_rank: 1)
|
| 2281 |
+
exitcode : 1 (pid: 2401633)
|
| 2282 |
+
error_file: <N/A>
|
| 2283 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 2284 |
+
============================================================
|
| 2285 |
+
+ set +x
|
| 2286 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
| 2287 |
+
+ export PROF_CTX_LENGTH=32768
|
| 2288 |
+
+ PROF_CTX_LENGTH=32768
|
| 2289 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L32768*tp2.cp4.bs32.json'
|
| 2290 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L32768*tp2.cp4.bs32.json' ']'
|
| 2291 |
+
+ echo 'Running ctx_length=32768, TP_SIZE=2, CP_SIZE=4, BATCH_SIZE=32'
|
| 2292 |
+
+ srun bash ./attnserver.sh
|
| 2293 |
+
+ which python3
|
| 2294 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343248 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-791:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 32768 --max-position-embeddings 32768 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 2295 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 2296 |
+
and will be removed in future. Use torchrun.
|
| 2297 |
+
Note that --use-env is set by default in torchrun.
|
| 2298 |
+
If your script expects `--local-rank` argument to be set, please
|
| 2299 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 2300 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 2301 |
+
further instructions
|
| 2302 |
+
|
| 2303 |
+
main()
|
| 2304 |
+
W0621 22:07:41.521000 2403416 site-packages/torch/distributed/run.py:766]
|
| 2305 |
+
W0621 22:07:41.521000 2403416 site-packages/torch/distributed/run.py:766] *****************************************
|
| 2306 |
+
W0621 22:07:41.521000 2403416 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 2307 |
+
W0621 22:07:41.521000 2403416 site-packages/torch/distributed/run.py:766] *****************************************
|
| 2308 |
+
[rank6]:[W621 22:08:04.724807006 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2309 |
+
[rank2]:[W621 22:08:04.724811060 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2310 |
+
[rank4]:[W621 22:08:04.724828424 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2311 |
+
[rank1]:[W621 22:08:04.725748608 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2312 |
+
[rank5]:[W621 22:08:04.725887611 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2313 |
+
[rank7]:[W621 22:08:04.726081401 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2314 |
+
[rank3]:[W621 22:08:04.730565763 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2315 |
+
[rank0]:[W621 22:08:04.851008941 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2316 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 2317 |
+
warnings.warn(
|
| 2318 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 2319 |
+
warnings.warn(
|
| 2320 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 2321 |
+
warnings.warn(
|
| 2322 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 2323 |
+
warnings.warn(
|
| 2324 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 2325 |
+
warnings.warn(
|
| 2326 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 2327 |
+
warnings.warn(
|
| 2328 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 2329 |
+
warnings.warn(
|
| 2330 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 2331 |
+
warnings.warn(
|
| 2332 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2333 |
+
warnings.warn(
|
| 2334 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2335 |
+
warnings.warn(
|
| 2336 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2337 |
+
warnings.warn(
|
| 2338 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2339 |
+
warnings.warn(
|
| 2340 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2341 |
+
warnings.warn(
|
| 2342 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2343 |
+
warnings.warn(
|
| 2344 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2345 |
+
warnings.warn(
|
| 2346 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 2347 |
+
warnings.warn(
|
| 2348 |
+
[rank3]: Traceback (most recent call last):
|
| 2349 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2350 |
+
[rank3]: pretrain(
|
| 2351 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2352 |
+
[rank3]: iteration, num_floating_point_operations_so_far = train(
|
| 2353 |
+
[rank3]: ^^^^^^
|
| 2354 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2355 |
+
[rank3]: ) = train_step(
|
| 2356 |
+
[rank3]: ^^^^^^^^^^^
|
| 2357 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2358 |
+
[rank3]: losses_reduced = forward_backward_func(
|
| 2359 |
+
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2360 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2361 |
+
[rank3]: output_tensor, num_tokens = forward_step(
|
| 2362 |
+
[rank3]: ^^^^^^^^^^^^^
|
| 2363 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2364 |
+
[rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2365 |
+
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2366 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2367 |
+
[rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2368 |
+
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2369 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2370 |
+
[rank3]: batch = next(global_batches)
|
| 2371 |
+
[rank3]: ^^^^^^^^^^^^^^^^^^^^
|
| 2372 |
+
[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2373 |
+
[rank3]: attention_mask = torch.ones(
|
| 2374 |
+
[rank3]: ^^^^^^^^^^^
|
| 2375 |
+
[rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 134.42 GiB is free. Including non-PyTorch memory, this process has 5.38 GiB memory in use. Of the allocated memory 3.71 GiB is allocated by PyTorch, and 225.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2376 |
+
[rank4]: Traceback (most recent call last):
|
| 2377 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2378 |
+
[rank4]: pretrain(
|
| 2379 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2380 |
+
[rank4]: iteration, num_floating_point_operations_so_far = train(
|
| 2381 |
+
[rank4]: ^^^^^^
|
| 2382 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2383 |
+
[rank4]: ) = train_step(
|
| 2384 |
+
[rank4]: ^^^^^^^^^^^
|
| 2385 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2386 |
+
[rank4]: losses_reduced = forward_backward_func(
|
| 2387 |
+
[rank4]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2388 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2389 |
+
[rank4]: output_tensor, num_tokens = forward_step(
|
| 2390 |
+
[rank4]: ^^^^^^^^^^^^^
|
| 2391 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2392 |
+
[rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2393 |
+
[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2394 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2395 |
+
[rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2396 |
+
[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2397 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2398 |
+
[rank4]: batch = next(global_batches)
|
| 2399 |
+
[rank4]: ^^^^^^^^^^^^^^^^^^^^
|
| 2400 |
+
[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2401 |
+
[rank4]: attention_mask = torch.ones(
|
| 2402 |
+
[rank4]: ^^^^^^^^^^^
|
| 2403 |
+
[rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 134.42 GiB is free. Including non-PyTorch memory, this process has 5.38 GiB memory in use. Of the allocated memory 3.71 GiB is allocated by PyTorch, and 225.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2404 |
+
[rank5]: Traceback (most recent call last):
|
| 2405 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2406 |
+
[rank5]: pretrain(
|
| 2407 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2408 |
+
[rank5]: iteration, num_floating_point_operations_so_far = train(
|
| 2409 |
+
[rank5]: ^^^^^^
|
| 2410 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2411 |
+
[rank5]: ) = train_step(
|
| 2412 |
+
[rank5]: ^^^^^^^^^^^
|
| 2413 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2414 |
+
[rank5]: losses_reduced = forward_backward_func(
|
| 2415 |
+
[rank5]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2416 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2417 |
+
[rank5]: output_tensor, num_tokens = forward_step(
|
| 2418 |
+
[rank5]: ^^^^^^^^^^^^^
|
| 2419 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2420 |
+
[rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2421 |
+
[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2422 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2423 |
+
[rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2424 |
+
[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2425 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2426 |
+
[rank5]: batch = next(global_batches)
|
| 2427 |
+
[rank5]: ^^^^^^^^^^^^^^^^^^^^
|
| 2428 |
+
[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2429 |
+
[rank5]: attention_mask = torch.ones(
|
| 2430 |
+
[rank5]: ^^^^^^^^^^^
|
| 2431 |
+
[rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 134.42 GiB is free. Including non-PyTorch memory, this process has 5.38 GiB memory in use. Of the allocated memory 3.71 GiB is allocated by PyTorch, and 225.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2432 |
+
[rank0]: Traceback (most recent call last):
|
| 2433 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2434 |
+
[rank0]: pretrain(
|
| 2435 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2436 |
+
[rank0]: iteration, num_floating_point_operations_so_far = train(
|
| 2437 |
+
[rank0]: ^^^^^^
|
| 2438 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2439 |
+
[rank0]: ) = train_step(
|
| 2440 |
+
[rank0]: ^^^^^^^^^^^
|
| 2441 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2442 |
+
[rank0]: losses_reduced = forward_backward_func(
|
| 2443 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2444 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2445 |
+
[rank0]: output_tensor, num_tokens = forward_step(
|
| 2446 |
+
[rank0]: ^^^^^^^^^^^^^
|
| 2447 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2448 |
+
[rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2449 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2450 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2451 |
+
[rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2452 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2453 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2454 |
+
[rank0]: batch = next(global_batches)
|
| 2455 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^
|
| 2456 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2457 |
+
[rank0]: attention_mask = torch.ones(
|
| 2458 |
+
[rank0]: ^^^^^^^^^^^
|
| 2459 |
+
[rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 134.42 GiB is free. Including non-PyTorch memory, this process has 5.38 GiB memory in use. Of the allocated memory 3.71 GiB is allocated by PyTorch, and 225.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2460 |
+
[rank6]: Traceback (most recent call last):
|
| 2461 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2462 |
+
[rank6]: pretrain(
|
| 2463 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2464 |
+
[rank6]: iteration, num_floating_point_operations_so_far = train(
|
| 2465 |
+
[rank6]: ^^^^^^
|
| 2466 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2467 |
+
[rank6]: ) = train_step(
|
| 2468 |
+
[rank6]: ^^^^^^^^^^^
|
| 2469 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2470 |
+
[rank6]: losses_reduced = forward_backward_func(
|
| 2471 |
+
[rank6]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2472 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2473 |
+
[rank6]: output_tensor, num_tokens = forward_step(
|
| 2474 |
+
[rank6]: ^^^^^^^^^^^^^
|
| 2475 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2476 |
+
[rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2477 |
+
[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2478 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2479 |
+
[rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2480 |
+
[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2481 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2482 |
+
[rank6]: batch = next(global_batches)
|
| 2483 |
+
[rank6]: ^^^^^^^^^^^^^^^^^^^^
|
| 2484 |
+
[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2485 |
+
[rank6]: attention_mask = torch.ones(
|
| 2486 |
+
[rank6]: ^^^^^^^^^^^
|
| 2487 |
+
[rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 134.42 GiB is free. Including non-PyTorch memory, this process has 5.38 GiB memory in use. Of the allocated memory 3.71 GiB is allocated by PyTorch, and 225.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2488 |
+
[rank7]: Traceback (most recent call last):
|
| 2489 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2490 |
+
[rank7]: pretrain(
|
| 2491 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2492 |
+
[rank7]: iteration, num_floating_point_operations_so_far = train(
|
| 2493 |
+
[rank7]: ^^^^^^
|
| 2494 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2495 |
+
[rank7]: ) = train_step(
|
| 2496 |
+
[rank7]: ^^^^^^^^^^^
|
| 2497 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2498 |
+
[rank7]: losses_reduced = forward_backward_func(
|
| 2499 |
+
[rank7]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2500 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2501 |
+
[rank7]: output_tensor, num_tokens = forward_step(
|
| 2502 |
+
[rank7]: ^^^^^^^^^^^^^
|
| 2503 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2504 |
+
[rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2505 |
+
[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2506 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2507 |
+
[rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2508 |
+
[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2509 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2510 |
+
[rank7]: batch = next(global_batches)
|
| 2511 |
+
[rank7]: ^^^^^^^^^^^^^^^^^^^^
|
| 2512 |
+
[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2513 |
+
[rank7]: attention_mask = torch.ones(
|
| 2514 |
+
[rank7]: ^^^^^^^^^^^
|
| 2515 |
+
[rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 134.42 GiB is free. Including non-PyTorch memory, this process has 5.38 GiB memory in use. Of the allocated memory 3.71 GiB is allocated by PyTorch, and 225.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2516 |
+
[rank2]: Traceback (most recent call last):
|
| 2517 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2518 |
+
[rank2]: pretrain(
|
| 2519 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2520 |
+
[rank2]: iteration, num_floating_point_operations_so_far = train(
|
| 2521 |
+
[rank2]: ^^^^^^
|
| 2522 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2523 |
+
[rank2]: ) = train_step(
|
| 2524 |
+
[rank2]: ^^^^^^^^^^^
|
| 2525 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2526 |
+
[rank2]: losses_reduced = forward_backward_func(
|
| 2527 |
+
[rank2]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2528 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2529 |
+
[rank2]: output_tensor, num_tokens = forward_step(
|
| 2530 |
+
[rank2]: ^^^^^^^^^^^^^
|
| 2531 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2532 |
+
[rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2533 |
+
[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2534 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2535 |
+
[rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2536 |
+
[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2537 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2538 |
+
[rank2]: batch = next(global_batches)
|
| 2539 |
+
[rank2]: ^^^^^^^^^^^^^^^^^^^^
|
| 2540 |
+
[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2541 |
+
[rank2]: attention_mask = torch.ones(
|
| 2542 |
+
[rank2]: ^^^^^^^^^^^
|
| 2543 |
+
[rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 134.42 GiB is free. Including non-PyTorch memory, this process has 5.38 GiB memory in use. Of the allocated memory 3.71 GiB is allocated by PyTorch, and 225.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2544 |
+
[rank1]: Traceback (most recent call last):
|
| 2545 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 2546 |
+
[rank1]: pretrain(
|
| 2547 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain
|
| 2548 |
+
[rank1]: iteration, num_floating_point_operations_so_far = train(
|
| 2549 |
+
[rank1]: ^^^^^^
|
| 2550 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train
|
| 2551 |
+
[rank1]: ) = train_step(
|
| 2552 |
+
[rank1]: ^^^^^^^^^^^
|
| 2553 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step
|
| 2554 |
+
[rank1]: losses_reduced = forward_backward_func(
|
| 2555 |
+
[rank1]: ^^^^^^^^^^^^^^^^^^^^^^
|
| 2556 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining
|
| 2557 |
+
[rank1]: output_tensor, num_tokens = forward_step(
|
| 2558 |
+
[rank1]: ^^^^^^^^^^^^^
|
| 2559 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step
|
| 2560 |
+
[rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model)
|
| 2561 |
+
[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2562 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step
|
| 2563 |
+
[rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)
|
| 2564 |
+
[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2565 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch
|
| 2566 |
+
[rank1]: batch = next(global_batches)
|
| 2567 |
+
[rank1]: ^^^^^^^^^^^^^^^^^^^^
|
| 2568 |
+
[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches
|
| 2569 |
+
[rank1]: attention_mask = torch.ones(
|
| 2570 |
+
[rank1]: ^^^^^^^^^^^
|
| 2571 |
+
[rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32768.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 134.42 GiB is free. Including non-PyTorch memory, this process has 5.38 GiB memory in use. Of the allocated memory 3.71 GiB is allocated by PyTorch, and 225.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|
| 2572 |
+
[rank5]:[W621 22:08:15.174747082 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 2573 |
+
[rank1]:[W621 22:08:15.215304489 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 2574 |
+
[rank7]:[W621 22:08:15.215349183 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 2575 |
+
[rank3]:[W621 22:08:15.235729396 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 2576 |
+
W0621 22:08:17.165000 2403416 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2403487 closing signal SIGTERM
|
| 2577 |
+
W0621 22:08:17.169000 2403416 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2403488 closing signal SIGTERM
|
| 2578 |
+
W0621 22:08:17.169000 2403416 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2403489 closing signal SIGTERM
|
| 2579 |
+
W0621 22:08:17.171000 2403416 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2403490 closing signal SIGTERM
|
| 2580 |
+
W0621 22:08:17.171000 2403416 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2403491 closing signal SIGTERM
|
| 2581 |
+
W0621 22:08:17.190000 2403416 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2403493 closing signal SIGTERM
|
| 2582 |
+
W0621 22:08:17.193000 2403416 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2403494 closing signal SIGTERM
|
| 2583 |
+
E0621 22:08:17.736000 2403416 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 2403492) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 2584 |
+
Traceback (most recent call last):
|
| 2585 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
| 2586 |
+
File "<frozen runpy>", line 88, in _run_code
|
| 2587 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
| 2588 |
+
main()
|
| 2589 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
| 2590 |
+
return arg(*args, **kwargs)
|
| 2591 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 2592 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
| 2593 |
+
launch(args)
|
| 2594 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
| 2595 |
+
run(args)
|
| 2596 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
| 2597 |
+
elastic_launch(
|
| 2598 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
| 2599 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 2600 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 2601 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
| 2602 |
+
raise ChildFailedError(
|
| 2603 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 2604 |
+
============================================================
|
| 2605 |
+
./pretrain_gpt_profile.py FAILED
|
| 2606 |
+
------------------------------------------------------------
|
| 2607 |
+
Failures:
|
| 2608 |
+
<NO_OTHER_FAILURES>
|
| 2609 |
+
------------------------------------------------------------
|
| 2610 |
+
Root Cause (first observed failure):
|
| 2611 |
+
[0]:
|
| 2612 |
+
time : 2025-06-21_22:08:17
|
| 2613 |
+
host : fs-mbz-gpu-791
|
| 2614 |
+
rank : 5 (local_rank: 5)
|
| 2615 |
+
exitcode : 1 (pid: 2403492)
|
| 2616 |
+
error_file: <N/A>
|
| 2617 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 2618 |
+
============================================================
|
| 2619 |
+
+ set +x
|
| 2620 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
| 2621 |
+
+ export PROF_CTX_LENGTH=40960
|
| 2622 |
+
+ PROF_CTX_LENGTH=40960
|
| 2623 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L40960*tp2.cp4.bs32.json'
|
| 2624 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L40960*tp2.cp4.bs32.json' ']'
|
| 2625 |
+
+ echo 'Running ctx_length=40960, TP_SIZE=2, CP_SIZE=4, BATCH_SIZE=32'
|
| 2626 |
+
+ srun bash ./attnserver.sh
|
| 2627 |
+
+ which python3
|
| 2628 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343248 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-791:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 2 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 40960 --max-position-embeddings 40960 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 2629 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 2630 |
+
and will be removed in future. Use torchrun.
|
| 2631 |
+
Note that --use-env is set by default in torchrun.
|
| 2632 |
+
If your script expects `--local-rank` argument to be set, please
|
| 2633 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 2634 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 2635 |
+
further instructions
|
| 2636 |
+
|
| 2637 |
+
main()
|
| 2638 |
+
W0621 22:08:22.025000 2405252 site-packages/torch/distributed/run.py:766]
|
| 2639 |
+
W0621 22:08:22.025000 2405252 site-packages/torch/distributed/run.py:766] *****************************************
|
| 2640 |
+
W0621 22:08:22.025000 2405252 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 2641 |
+
W0621 22:08:22.025000 2405252 site-packages/torch/distributed/run.py:766] *****************************************
|
| 2642 |
+
[rank5]:[W621 22:08:44.405819851 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2643 |
+
[rank3]:[W621 22:08:44.405819770 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2644 |
+
[rank7]:[W621 22:08:44.405833491 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2645 |
+
[rank1]:[W621 22:08:44.407927906 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2646 |
+
[rank4]:[W621 22:08:44.411396081 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2647 |
+
[rank6]:[W621 22:08:44.411456484 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2648 |
+
[rank2]:[W621 22:08:44.413885260 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 2649 |
+
[rank0]:[W621 22:08:44.551861768 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
attnserver.run_attnserver.slurm.sh.343248.out.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343261.err.log
CHANGED
|
@@ -200,3 +200,146 @@ W0621 22:06:13.082000 2070539 site-packages/torch/distributed/run.py:766] ******
|
|
| 200 |
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 792, in __init__
|
| 201 |
[rank0]: torch._C.PyTorchFileWriter(
|
| 202 |
[rank0]: RuntimeError: Parent directory gpt-checkpoint/iter_0000010 does not exist.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 792, in __init__
|
| 201 |
[rank0]: torch._C.PyTorchFileWriter(
|
| 202 |
[rank0]: RuntimeError: Parent directory gpt-checkpoint/iter_0000010 does not exist.
|
| 203 |
+
[rank0]:[W621 22:07:31.451041648 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 204 |
+
W0621 22:07:35.198000 2070539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2070611 closing signal SIGTERM
|
| 205 |
+
W0621 22:07:35.202000 2070539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2070612 closing signal SIGTERM
|
| 206 |
+
W0621 22:07:35.205000 2070539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2070613 closing signal SIGTERM
|
| 207 |
+
W0621 22:07:35.208000 2070539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2070614 closing signal SIGTERM
|
| 208 |
+
W0621 22:07:35.228000 2070539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2070615 closing signal SIGTERM
|
| 209 |
+
W0621 22:07:35.233000 2070539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2070616 closing signal SIGTERM
|
| 210 |
+
W0621 22:07:35.235000 2070539 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2070617 closing signal SIGTERM
|
| 211 |
+
E0621 22:07:36.786000 2070539 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 2070610) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 212 |
+
Traceback (most recent call last):
|
| 213 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
| 214 |
+
File "<frozen runpy>", line 88, in _run_code
|
| 215 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
|
| 216 |
+
main()
|
| 217 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
|
| 218 |
+
return arg(*args, **kwargs)
|
| 219 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 220 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
|
| 221 |
+
launch(args)
|
| 222 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
|
| 223 |
+
run(args)
|
| 224 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
|
| 225 |
+
elastic_launch(
|
| 226 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
|
| 227 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 228 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 229 |
+
File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
|
| 230 |
+
raise ChildFailedError(
|
| 231 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 232 |
+
============================================================
|
| 233 |
+
./pretrain_gpt_profile.py FAILED
|
| 234 |
+
------------------------------------------------------------
|
| 235 |
+
Failures:
|
| 236 |
+
<NO_OTHER_FAILURES>
|
| 237 |
+
------------------------------------------------------------
|
| 238 |
+
Root Cause (first observed failure):
|
| 239 |
+
[0]:
|
| 240 |
+
time : 2025-06-21_22:07:35
|
| 241 |
+
host : fs-mbz-gpu-830
|
| 242 |
+
rank : 0 (local_rank: 0)
|
| 243 |
+
exitcode : 1 (pid: 2070610)
|
| 244 |
+
error_file: <N/A>
|
| 245 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 246 |
+
============================================================
|
| 247 |
+
+ set +x
|
| 248 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
| 249 |
+
+ export PROF_CTX_LENGTH=2048
|
| 250 |
+
+ PROF_CTX_LENGTH=2048
|
| 251 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L2048*tp1.cp8.bs1.json'
|
| 252 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L2048*tp1.cp8.bs1.json' ']'
|
| 253 |
+
+ echo 'Running ctx_length=2048, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=1'
|
| 254 |
+
+ srun bash ./attnserver.sh
|
| 255 |
+
+ which python3
|
| 256 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343261 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-830:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 2048 --max-position-embeddings 2048 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 257 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 258 |
+
and will be removed in future. Use torchrun.
|
| 259 |
+
Note that --use-env is set by default in torchrun.
|
| 260 |
+
If your script expects `--local-rank` argument to be set, please
|
| 261 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 262 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 263 |
+
further instructions
|
| 264 |
+
|
| 265 |
+
main()
|
| 266 |
+
W0621 22:07:40.187000 2073806 site-packages/torch/distributed/run.py:766]
|
| 267 |
+
W0621 22:07:40.187000 2073806 site-packages/torch/distributed/run.py:766] *****************************************
|
| 268 |
+
W0621 22:07:40.187000 2073806 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 269 |
+
W0621 22:07:40.187000 2073806 site-packages/torch/distributed/run.py:766] *****************************************
|
| 270 |
+
[rank5]:[W621 22:08:01.536122863 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 271 |
+
[rank2]:[W621 22:08:01.536729225 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 272 |
+
[rank6]:[W621 22:08:01.537138778 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 273 |
+
[rank7]:[W621 22:08:01.537688344 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 274 |
+
[rank4]:[W621 22:08:01.538826954 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 275 |
+
[rank3]:[W621 22:08:01.539080415 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 276 |
+
[rank1]:[W621 22:08:01.544484319 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 277 |
+
[rank0]:[W621 22:08:02.687400636 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 278 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 279 |
+
warnings.warn(
|
| 280 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 281 |
+
warnings.warn(
|
| 282 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 283 |
+
warnings.warn(
|
| 284 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 285 |
+
warnings.warn(
|
| 286 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 287 |
+
warnings.warn(
|
| 288 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 289 |
+
warnings.warn(
|
| 290 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 291 |
+
warnings.warn(
|
| 292 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 293 |
+
warnings.warn(
|
| 294 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 295 |
+
warnings.warn(
|
| 296 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 297 |
+
warnings.warn(
|
| 298 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 299 |
+
warnings.warn(
|
| 300 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 301 |
+
warnings.warn(
|
| 302 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 303 |
+
warnings.warn(
|
| 304 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 305 |
+
warnings.warn(
|
| 306 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 307 |
+
warnings.warn(
|
| 308 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 309 |
+
warnings.warn(
|
| 310 |
+
[rank0]: Traceback (most recent call last):
|
| 311 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 312 |
+
[rank0]: pretrain(
|
| 313 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 314 |
+
[rank0]: save_checkpoint(
|
| 315 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 316 |
+
[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 317 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 318 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 319 |
+
[rank0]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 320 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 321 |
+
[rank0]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 322 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 323 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 324 |
+
[rank0]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 325 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 326 |
+
[rank0]: finalize_fn()
|
| 327 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 328 |
+
[rank0]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 329 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 243, in save_state_dict_async_finalize
|
| 330 |
+
[rank0]: storage_writer.finish(global_metadata, all_results)
|
| 331 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 483, in finish
|
| 332 |
+
[rank0]: super().finish(metadata, results)
|
| 333 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 697, in finish
|
| 334 |
+
[rank0]: with self.fs.create_stream(tmp_path, "wb") as metadata_file:
|
| 335 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 336 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/contextlib.py", line 137, in __enter__
|
| 337 |
+
[rank0]: return next(self.gen)
|
| 338 |
+
[rank0]: ^^^^^^^^^^^^^^
|
| 339 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 476, in create_stream
|
| 340 |
+
[rank0]: with path.open(mode) as stream:
|
| 341 |
+
[rank0]: ^^^^^^^^^^^^^^^
|
| 342 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/pathlib.py", line 1013, in open
|
| 343 |
+
[rank0]: return io.open(self, mode, buffering, encoding, errors, newline)
|
| 344 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 345 |
+
[rank0]: FileNotFoundError: [Errno 2] No such file or directory: 'gpt-checkpoint/iter_0000010/.metadata.tmp'
|
attnserver.run_attnserver.slurm.sh.343261.out.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343262.err.log
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
+ source /mnt/weka/home/hao.zhang/conda/miniconda/bin/activate
|
| 2 |
+
++ _CONDA_ROOT=/mnt/weka/home/hao.zhang/conda/miniconda
|
| 3 |
+
++ . /mnt/weka/home/hao.zhang/conda/miniconda/etc/profile.d/conda.sh
|
| 4 |
+
+++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
| 5 |
+
+++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
| 6 |
+
+++ export _CE_M=
|
| 7 |
+
+++ _CE_M=
|
| 8 |
+
+++ export _CE_CONDA=
|
| 9 |
+
+++ _CE_CONDA=
|
| 10 |
+
+++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
| 11 |
+
+++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
| 12 |
+
+++ '[' -z x ']'
|
| 13 |
+
++ conda activate
|
| 14 |
+
++ local cmd=activate
|
| 15 |
+
++ case "$cmd" in
|
| 16 |
+
++ __conda_activate activate
|
| 17 |
+
++ '[' -n '' ']'
|
| 18 |
+
++ local ask_conda
|
| 19 |
+
+++ PS1=
|
| 20 |
+
+++ __conda_exe shell.posix activate
|
| 21 |
+
+++ '[' -n '' ']'
|
| 22 |
+
+++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate
|
| 23 |
+
++ ask_conda='unset _CE_M
|
| 24 |
+
unset _CE_CONDA
|
| 25 |
+
PS1='\''(base) '\''
|
| 26 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
| 27 |
+
export CONDA_SHLVL='\''1'\''
|
| 28 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
| 29 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
| 30 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
| 31 |
+
++ eval 'unset _CE_M
|
| 32 |
+
unset _CE_CONDA
|
| 33 |
+
PS1='\''(base) '\''
|
| 34 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
| 35 |
+
export CONDA_SHLVL='\''1'\''
|
| 36 |
+
export CONDA_PROMPT_MODIFIER='\''(base) '\''
|
| 37 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
| 38 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
| 39 |
+
+++ unset _CE_M
|
| 40 |
+
+++ unset _CE_CONDA
|
| 41 |
+
+++ PS1='(base) '
|
| 42 |
+
+++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
| 43 |
+
+++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
| 44 |
+
+++ export CONDA_SHLVL=1
|
| 45 |
+
+++ CONDA_SHLVL=1
|
| 46 |
+
+++ export 'CONDA_PROMPT_MODIFIER=(base) '
|
| 47 |
+
+++ CONDA_PROMPT_MODIFIER='(base) '
|
| 48 |
+
+++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
| 49 |
+
+++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
| 50 |
+
+++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
| 51 |
+
+++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
| 52 |
+
++ __conda_hashr
|
| 53 |
+
++ '[' -n '' ']'
|
| 54 |
+
++ '[' -n '' ']'
|
| 55 |
+
++ hash -r
|
| 56 |
+
+ conda activate junda-attnserver
|
| 57 |
+
+ local cmd=activate
|
| 58 |
+
+ case "$cmd" in
|
| 59 |
+
+ __conda_activate activate junda-attnserver
|
| 60 |
+
+ '[' -n '' ']'
|
| 61 |
+
+ local ask_conda
|
| 62 |
+
++ PS1='(base) '
|
| 63 |
+
++ __conda_exe shell.posix activate junda-attnserver
|
| 64 |
+
++ '[' -n '' ']'
|
| 65 |
+
++ /mnt/weka/home/hao.zhang/conda/miniconda/bin/conda shell.posix activate junda-attnserver
|
| 66 |
+
+ ask_conda='unset _CE_M
|
| 67 |
+
unset _CE_CONDA
|
| 68 |
+
PS1='\''(junda-attnserver) '\''
|
| 69 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
| 70 |
+
export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
|
| 71 |
+
export CONDA_SHLVL='\''2'\''
|
| 72 |
+
export CONDA_DEFAULT_ENV='\''junda-attnserver'\''
|
| 73 |
+
export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\''
|
| 74 |
+
export CONDA_PREFIX_1='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
|
| 75 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
| 76 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
| 77 |
+
+ eval 'unset _CE_M
|
| 78 |
+
unset _CE_CONDA
|
| 79 |
+
PS1='\''(junda-attnserver) '\''
|
| 80 |
+
export PATH='\''/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin'\''
|
| 81 |
+
export CONDA_PREFIX='\''/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver'\''
|
| 82 |
+
export CONDA_SHLVL='\''2'\''
|
| 83 |
+
export CONDA_DEFAULT_ENV='\''junda-attnserver'\''
|
| 84 |
+
export CONDA_PROMPT_MODIFIER='\''(junda-attnserver) '\''
|
| 85 |
+
export CONDA_PREFIX_1='\''/mnt/weka/home/hao.zhang/conda/miniconda'\''
|
| 86 |
+
export CONDA_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda'\''
|
| 87 |
+
export CONDA_PYTHON_EXE='\''/mnt/weka/home/hao.zhang/conda/miniconda/bin/python'\'''
|
| 88 |
+
++ unset _CE_M
|
| 89 |
+
++ unset _CE_CONDA
|
| 90 |
+
++ PS1='(junda-attnserver) '
|
| 91 |
+
++ export PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
| 92 |
+
++ PATH=/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/.local/bin:/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin:/mnt/weka/home/hao.zhang/conda/miniconda/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
|
| 93 |
+
++ export CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
|
| 94 |
+
++ CONDA_PREFIX=/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver
|
| 95 |
+
++ export CONDA_SHLVL=2
|
| 96 |
+
++ CONDA_SHLVL=2
|
| 97 |
+
++ export CONDA_DEFAULT_ENV=junda-attnserver
|
| 98 |
+
++ CONDA_DEFAULT_ENV=junda-attnserver
|
| 99 |
+
++ export 'CONDA_PROMPT_MODIFIER=(junda-attnserver) '
|
| 100 |
+
++ CONDA_PROMPT_MODIFIER='(junda-attnserver) '
|
| 101 |
+
++ export CONDA_PREFIX_1=/mnt/weka/home/hao.zhang/conda/miniconda
|
| 102 |
+
++ CONDA_PREFIX_1=/mnt/weka/home/hao.zhang/conda/miniconda
|
| 103 |
+
++ export CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
| 104 |
+
++ CONDA_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/conda
|
| 105 |
+
++ export CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
| 106 |
+
++ CONDA_PYTHON_EXE=/mnt/weka/home/hao.zhang/conda/miniconda/bin/python
|
| 107 |
+
+ __conda_hashr
|
| 108 |
+
+ '[' -n '' ']'
|
| 109 |
+
+ '[' -n '' ']'
|
| 110 |
+
+ hash -r
|
| 111 |
+
+ export CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5
|
| 112 |
+
+ CHROME_TRACE_PREFIX=/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5
|
| 113 |
+
+ mkdir -p /mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5
|
| 114 |
+
+ export PROF_TP_SIZE=1
|
| 115 |
+
+ PROF_TP_SIZE=1
|
| 116 |
+
+ export PROF_CP_SIZE=8
|
| 117 |
+
+ PROF_CP_SIZE=8
|
| 118 |
+
+ export PROF_BS=2
|
| 119 |
+
+ PROF_BS=2
|
| 120 |
+
+ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
|
| 121 |
+
+ export PROF_CTX_LENGTH=1024
|
| 122 |
+
+ PROF_CTX_LENGTH=1024
|
| 123 |
+
+ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L1024*tp1.cp8.bs2.json'
|
| 124 |
+
+ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L1024*tp1.cp8.bs2.json' ']'
|
| 125 |
+
+ echo 'Running ctx_length=1024, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=2'
|
| 126 |
+
+ srun bash ./attnserver.sh
|
| 127 |
+
+ which python3
|
| 128 |
+
+ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343262 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-570:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
|
| 129 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
|
| 130 |
+
and will be removed in future. Use torchrun.
|
| 131 |
+
Note that --use-env is set by default in torchrun.
|
| 132 |
+
If your script expects `--local-rank` argument to be set, please
|
| 133 |
+
change it to read from `os.environ['LOCAL_RANK']` instead. See
|
| 134 |
+
https://pytorch.org/docs/stable/distributed.html#launch-utility for
|
| 135 |
+
further instructions
|
| 136 |
+
|
| 137 |
+
main()
|
| 138 |
+
W0621 22:07:39.919000 3216603 site-packages/torch/distributed/run.py:766]
|
| 139 |
+
W0621 22:07:39.919000 3216603 site-packages/torch/distributed/run.py:766] *****************************************
|
| 140 |
+
W0621 22:07:39.919000 3216603 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
| 141 |
+
W0621 22:07:39.919000 3216603 site-packages/torch/distributed/run.py:766] *****************************************
|
| 142 |
+
[rank7]:[W621 22:08:01.735665882 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 143 |
+
[rank1]:[W621 22:08:01.735666035 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 144 |
+
[rank6]:[W621 22:08:01.735714247 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 145 |
+
[rank4]:[W621 22:08:01.735803924 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 146 |
+
[rank2]:[W621 22:08:01.736885617 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 147 |
+
[rank3]:[W621 22:08:01.741263660 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 148 |
+
[rank5]:[W621 22:08:01.741595043 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 149 |
+
[rank0]:[W621 22:08:01.923892828 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
|
| 150 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 151 |
+
warnings.warn(
|
| 152 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 153 |
+
warnings.warn(
|
| 154 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 155 |
+
warnings.warn(
|
| 156 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 157 |
+
warnings.warn(
|
| 158 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 159 |
+
warnings.warn(
|
| 160 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 161 |
+
warnings.warn(
|
| 162 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 163 |
+
warnings.warn(
|
| 164 |
+
/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
|
| 165 |
+
warnings.warn(
|
| 166 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 167 |
+
warnings.warn(
|
| 168 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 169 |
+
warnings.warn(
|
| 170 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 171 |
+
warnings.warn(
|
| 172 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 173 |
+
warnings.warn(
|
| 174 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 175 |
+
warnings.warn(
|
| 176 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 177 |
+
warnings.warn(
|
| 178 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 179 |
+
warnings.warn(
|
| 180 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 181 |
+
warnings.warn(
|
| 182 |
+
[rank0]: Traceback (most recent call last):
|
| 183 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 184 |
+
[rank0]: pretrain(
|
| 185 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 186 |
+
[rank0]: save_checkpoint(
|
| 187 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 188 |
+
[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 189 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 190 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save
|
| 191 |
+
[rank0]: sharded_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 192 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save
|
| 193 |
+
[rank0]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir)
|
| 194 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 195 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save
|
| 196 |
+
[rank0]: async_calls.maybe_finalize_async_calls(blocking=True)
|
| 197 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls
|
| 198 |
+
[rank0]: finalize_fn()
|
| 199 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn
|
| 200 |
+
[rank0]: save_state_dict_async_finalize(*save_state_dict_ret)
|
| 201 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 243, in save_state_dict_async_finalize
|
| 202 |
+
[rank0]: storage_writer.finish(global_metadata, all_results)
|
| 203 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 483, in finish
|
| 204 |
+
[rank0]: super().finish(metadata, results)
|
| 205 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 697, in finish
|
| 206 |
+
[rank0]: with self.fs.create_stream(tmp_path, "wb") as metadata_file:
|
| 207 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 208 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/contextlib.py", line 137, in __enter__
|
| 209 |
+
[rank0]: return next(self.gen)
|
| 210 |
+
[rank0]: ^^^^^^^^^^^^^^
|
| 211 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 476, in create_stream
|
| 212 |
+
[rank0]: with path.open(mode) as stream:
|
| 213 |
+
[rank0]: ^^^^^^^^^^^^^^^
|
| 214 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/pathlib.py", line 1013, in open
|
| 215 |
+
[rank0]: return io.open(self, mode, buffering, encoding, errors, newline)
|
| 216 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 217 |
+
[rank0]: FileNotFoundError: [Errno 2] No such file or directory: 'gpt-checkpoint/iter_0000010/.metadata.tmp'
|
attnserver.run_attnserver.slurm.sh.343262.out.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|