Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- attnserver.run_attnserver.slurm.sh.343188.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343188.out.log +1023 -0
- attnserver.run_attnserver.slurm.sh.343190.err.log +2 -2
- attnserver.run_attnserver.slurm.sh.343190.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343191.err.log +0 -0
- attnserver.run_attnserver.slurm.sh.343191.out.log +877 -0
- attnserver.run_attnserver.slurm.sh.343195.out.log +0 -0
- attnserver.run_attnserver.slurm.sh.343201.err.log +21 -0
- attnserver.run_attnserver.slurm.sh.343201.out.log +537 -0
.gitattributes
CHANGED
|
@@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
attnserver.run_attnserver.slurm.sh.343190.err.log filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
attnserver.run_attnserver.slurm.sh.343190.err.log filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
attnserver.run_attnserver.slurm.sh.343191.err.log filter=lfs diff=lfs merge=lfs -text
|
attnserver.run_attnserver.slurm.sh.343188.err.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343188.out.log
CHANGED
|
@@ -106577,3 +106577,1026 @@ DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks
|
|
| 106577 |
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(104857600), 0), (np.int64(52428800), 1), (np.int64(46137344), 2), (np.int64(46137344), 3), (np.int64(41959936), 4), (np.int64(41959936), 5), (np.int64(44040192), 6), (np.int64(44040192), 7)]
|
| 106578 |
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(104857600), 0), (np.int64(52428800), 1), (np.int64(46137344), 2), (np.int64(46137344), 3), (np.int64(41959936), 4), (np.int64(41959936), 5), (np.int64(44040192), 6), (np.int64(44040192), 7)]
|
| 106579 |
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(104857600), 0), (np.int64(52428800), 1), (np.int64(46137344), 2), (np.int64(46137344), 3), (np.int64(41959936), 4), (np.int64(41959936), 5), (np.int64(44040192), 6), (np.int64(44040192), 7)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106577 |
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(104857600), 0), (np.int64(52428800), 1), (np.int64(46137344), 2), (np.int64(46137344), 3), (np.int64(41959936), 4), (np.int64(41959936), 5), (np.int64(44040192), 6), (np.int64(44040192), 7)]
|
| 106578 |
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(104857600), 0), (np.int64(52428800), 1), (np.int64(46137344), 2), (np.int64(46137344), 3), (np.int64(41959936), 4), (np.int64(41959936), 5), (np.int64(44040192), 6), (np.int64(44040192), 7)]
|
| 106579 |
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(104857600), 0), (np.int64(52428800), 1), (np.int64(46137344), 2), (np.int64(46137344), 3), (np.int64(41959936), 4), (np.int64(41959936), 5), (np.int64(44040192), 6), (np.int64(44040192), 7)]
|
| 106580 |
+
Running ctx_length=81920, TP_SIZE=8, CP_SIZE=8, BATCH_SIZE=1
|
| 106581 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 106582 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 106583 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 106584 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 106585 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 106586 |
+
--------------------------------
|
| 106587 |
+
CTX_LENGTH: 81920
|
| 106588 |
+
TP_SIZE: 8
|
| 106589 |
+
--------------------------------
|
| 106590 |
+
CTX_LENGTH: 81920
|
| 106591 |
+
TP_SIZE: 8
|
| 106592 |
+
CP_SIZE: 8
|
| 106593 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 106594 |
+
CP_SIZE: 8
|
| 106595 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 106596 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 106597 |
+
--------------------------------
|
| 106598 |
+
CTX_LENGTH: 81920
|
| 106599 |
+
TP_SIZE: 8
|
| 106600 |
+
CP_SIZE: 8
|
| 106601 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 106602 |
+
--------------------------------
|
| 106603 |
+
CTX_LENGTH: 81920
|
| 106604 |
+
TP_SIZE: 8
|
| 106605 |
+
CP_SIZE: 8
|
| 106606 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 106607 |
+
--------------------------------
|
| 106608 |
+
--------------------------------
|
| 106609 |
+
--------------------------------
|
| 106610 |
+
CTX_LENGTH: 81920
|
| 106611 |
+
TP_SIZE: 8
|
| 106612 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 106613 |
+
--------------------------------
|
| 106614 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 106615 |
+
CP_SIZE: 8
|
| 106616 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 106617 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 106618 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 106619 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 106620 |
+
--------------------------------
|
| 106621 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 106622 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 106623 |
+
--------------------------------
|
| 106624 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 106625 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 106626 |
+
--------------------------------
|
| 106627 |
+
CTX_LENGTH: 81920
|
| 106628 |
+
TP_SIZE: 8
|
| 106629 |
+
--------------------------------
|
| 106630 |
+
CTX_LENGTH: 81920
|
| 106631 |
+
TP_SIZE: 8
|
| 106632 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 106633 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 106634 |
+
CP_SIZE: 8
|
| 106635 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 106636 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 106637 |
+
CP_SIZE: 8
|
| 106638 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 106639 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 106640 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 106641 |
+
--------------------------------
|
| 106642 |
+
--------------------------------
|
| 106643 |
+
CTX_LENGTH: 81920
|
| 106644 |
+
TP_SIZE: 8
|
| 106645 |
+
CP_SIZE: 8
|
| 106646 |
+
--------------------------------
|
| 106647 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 106648 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 106649 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 106650 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 106651 |
+
--------------------------------
|
| 106652 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 106653 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106654 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106655 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106656 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106657 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106658 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106659 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106660 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106661 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106662 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106663 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106664 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106665 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106666 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106667 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106668 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106669 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106670 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106671 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106672 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106673 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106674 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106675 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106676 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106677 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106678 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106679 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106680 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106681 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106682 |
+
WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written.
|
| 106683 |
+
WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it
|
| 106684 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106685 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106686 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106687 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106688 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106689 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106690 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106691 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106692 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106693 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106694 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106695 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106696 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106697 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106698 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106699 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106700 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106701 |
+
using world size: 64, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0
|
| 106702 |
+
Number of virtual stages per pipeline stage: None
|
| 106703 |
+
WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used
|
| 106704 |
+
using torch.float16 for parameters ...
|
| 106705 |
+
------------------------ arguments ------------------------
|
| 106706 |
+
account_for_embedding_in_pipeline_split ......... False
|
| 106707 |
+
account_for_loss_in_pipeline_split .............. False
|
| 106708 |
+
accumulate_allreduce_grads_in_fp32 .............. False
|
| 106709 |
+
adam_beta1 ...................................... 0.9
|
| 106710 |
+
adam_beta2 ...................................... 0.999
|
| 106711 |
+
adam_eps ........................................ 1e-08
|
| 106712 |
+
add_bias_linear ................................. True
|
| 106713 |
+
add_position_embedding .......................... True
|
| 106714 |
+
add_qkv_bias .................................... True
|
| 106715 |
+
adlr_autoresume ................................. False
|
| 106716 |
+
adlr_autoresume_interval ........................ 1000
|
| 106717 |
+
align_grad_reduce ............................... True
|
| 106718 |
+
align_param_gather .............................. False
|
| 106719 |
+
app_tag_run_name ................................ None
|
| 106720 |
+
app_tag_run_version ............................. 0.0.0
|
| 106721 |
+
apply_layernorm_1p .............................. False
|
| 106722 |
+
apply_query_key_layer_scaling ................... False
|
| 106723 |
+
apply_residual_connection_post_layernorm ........ False
|
| 106724 |
+
apply_rope_fusion ............................... False
|
| 106725 |
+
async_save ...................................... None
|
| 106726 |
+
async_tensor_model_parallel_allreduce ........... True
|
| 106727 |
+
attention_backend ............................... AttnBackend.auto
|
| 106728 |
+
attention_dropout ............................... 0.1
|
| 106729 |
+
attention_softmax_in_fp32 ....................... False
|
| 106730 |
+
auto_detect_ckpt_format ......................... False
|
| 106731 |
+
barrier_with_L1_time ............................ True
|
| 106732 |
+
bert_binary_head ................................ True
|
| 106733 |
+
bert_embedder_type .............................. megatron
|
| 106734 |
+
bert_load ....................................... None
|
| 106735 |
+
bf16 ............................................ False
|
| 106736 |
+
bias_dropout_fusion ............................. True
|
| 106737 |
+
bias_gelu_fusion ................................ True
|
| 106738 |
+
bias_swiglu_fusion .............................. True
|
| 106739 |
+
biencoder_projection_dim ........................ 0
|
| 106740 |
+
biencoder_shared_query_context_model ............ False
|
| 106741 |
+
block_data_path ................................. None
|
| 106742 |
+
calc_ft_timeouts ................................ False
|
| 106743 |
+
calculate_per_token_loss ........................ False
|
| 106744 |
+
check_for_large_grads ........................... False
|
| 106745 |
+
check_for_nan_in_loss_and_grad .................. False
|
| 106746 |
+
check_for_spiky_loss ............................ False
|
| 106747 |
+
check_weight_hash_across_dp_replicas_interval ... None
|
| 106748 |
+
ckpt_assume_constant_structure .................. False
|
| 106749 |
+
ckpt_convert_format ............................. None
|
| 106750 |
+
ckpt_convert_save ............................... None
|
| 106751 |
+
ckpt_convert_update_legacy_dist_opt_format ...... False
|
| 106752 |
+
ckpt_format ..................................... torch_dist
|
| 106753 |
+
ckpt_fully_parallel_load ........................ False
|
| 106754 |
+
ckpt_fully_parallel_save ........................ True
|
| 106755 |
+
ckpt_fully_parallel_save_deprecated ............. False
|
| 106756 |
+
ckpt_step ....................................... None
|
| 106757 |
+
classes_fraction ................................ 1.0
|
| 106758 |
+
clip_grad ....................................... 1.0
|
| 106759 |
+
clone_scatter_output_in_embedding ............... True
|
| 106760 |
+
config_logger_dir ...............................
|
| 106761 |
+
consumed_train_samples .......................... 0
|
| 106762 |
+
consumed_valid_samples .......................... 0
|
| 106763 |
+
context_parallel_size ........................... 8
|
| 106764 |
+
cp_comm_type .................................... ['p2p']
|
| 106765 |
+
create_attention_mask_in_dataloader ............. True
|
| 106766 |
+
cross_entropy_fusion_impl ....................... native
|
| 106767 |
+
cross_entropy_loss_fusion ....................... False
|
| 106768 |
+
cuda_graph_scope ................................ full
|
| 106769 |
+
cuda_graph_warmup_steps ......................... 3
|
| 106770 |
+
data_args_path .................................. None
|
| 106771 |
+
data_cache_path ................................. None
|
| 106772 |
+
data_parallel_random_init ....................... False
|
| 106773 |
+
data_parallel_sharding_strategy ................. no_shard
|
| 106774 |
+
data_parallel_size .............................. 1
|
| 106775 |
+
data_path ....................................... None
|
| 106776 |
+
data_per_class_fraction ......................... 1.0
|
| 106777 |
+
data_sharding ................................... True
|
| 106778 |
+
dataloader_type ................................. single
|
| 106779 |
+
ddp_average_in_collective ....................... False
|
| 106780 |
+
ddp_bucket_size ................................. None
|
| 106781 |
+
ddp_num_buckets ................................. None
|
| 106782 |
+
ddp_pad_buckets_for_high_nccl_busbw ............. False
|
| 106783 |
+
decoder_first_pipeline_num_layers ............... None
|
| 106784 |
+
decoder_last_pipeline_num_layers ................ None
|
| 106785 |
+
decoder_num_layers .............................. None
|
| 106786 |
+
decoder_seq_length .............................. None
|
| 106787 |
+
decoupled_lr .................................... None
|
| 106788 |
+
decoupled_min_lr ................................ None
|
| 106789 |
+
decrease_batch_size_if_needed ................... False
|
| 106790 |
+
defer_embedding_wgrad_compute ................... False
|
| 106791 |
+
deprecated_use_mcore_models ..................... False
|
| 106792 |
+
deterministic_mode .............................. False
|
| 106793 |
+
dino_bottleneck_size ............................ 256
|
| 106794 |
+
dino_freeze_last_layer .......................... 1
|
| 106795 |
+
dino_head_hidden_size ........................... 2048
|
| 106796 |
+
dino_local_crops_number ......................... 10
|
| 106797 |
+
dino_local_img_size ............................. 96
|
| 106798 |
+
dino_norm_last_layer ............................ False
|
| 106799 |
+
dino_teacher_temp ............................... 0.07
|
| 106800 |
+
dino_warmup_teacher_temp ........................ 0.04
|
| 106801 |
+
dino_warmup_teacher_temp_epochs ................. 30
|
| 106802 |
+
disable_bf16_reduced_precision_matmul ........... False
|
| 106803 |
+
disable_mamba_mem_eff_path ...................... False
|
| 106804 |
+
disable_straggler_on_startup .................... False
|
| 106805 |
+
dist_ckpt_format_deprecated ..................... None
|
| 106806 |
+
dist_ckpt_strictness ............................ assume_ok_unexpected
|
| 106807 |
+
distribute_saved_activations .................... False
|
| 106808 |
+
distributed_backend ............................. nccl
|
| 106809 |
+
distributed_timeout_minutes ..................... 10
|
| 106810 |
+
embedding_path .................................. None
|
| 106811 |
+
empty_unused_memory_level ....................... 0
|
| 106812 |
+
enable_cuda_graph ............................... False
|
| 106813 |
+
enable_ft_package ............................... False
|
| 106814 |
+
enable_gloo_process_groups ...................... True
|
| 106815 |
+
enable_msc ...................................... True
|
| 106816 |
+
enable_one_logger ............................... True
|
| 106817 |
+
encoder_num_layers .............................. 2
|
| 106818 |
+
encoder_pipeline_model_parallel_size ............ 0
|
| 106819 |
+
encoder_seq_length .............................. 81920
|
| 106820 |
+
encoder_tensor_model_parallel_size .............. 0
|
| 106821 |
+
end_weight_decay ................................ 0.1
|
| 106822 |
+
eod_mask_loss ................................... False
|
| 106823 |
+
error_injection_rate ............................ 0
|
| 106824 |
+
error_injection_type ............................ transient_error
|
| 106825 |
+
eval_interval ................................... 16
|
| 106826 |
+
eval_iters ...................................... 1
|
| 106827 |
+
evidence_data_path .............................. None
|
| 106828 |
+
exit_duration_in_mins ........................... None
|
| 106829 |
+
exit_interval ................................... None
|
| 106830 |
+
exit_on_missing_checkpoint ...................... False
|
| 106831 |
+
exit_signal_handler ............................. False
|
| 106832 |
+
exp_avg_dtype ................................... torch.float32
|
| 106833 |
+
exp_avg_sq_dtype ................................ torch.float32
|
| 106834 |
+
expert_model_parallel_size ...................... 1
|
| 106835 |
+
expert_tensor_parallel_size ..................... 8
|
| 106836 |
+
external_cuda_graph ............................. False
|
| 106837 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106838 |
+
ffn_hidden_size ................................. 16384
|
| 106839 |
+
finetune ........................................ False
|
| 106840 |
+
first_last_layers_bf16 .......................... False
|
| 106841 |
+
flash_decode .................................... False
|
| 106842 |
+
fp16 ............................................ True
|
| 106843 |
+
fp16_lm_cross_entropy ........................... False
|
| 106844 |
+
fp32_residual_connection ........................ False
|
| 106845 |
+
fp8 ............................................. None
|
| 106846 |
+
fp8_amax_compute_algo ........................... most_recent
|
| 106847 |
+
fp8_amax_history_len ............................ 1
|
| 106848 |
+
fp8_interval .................................... 1
|
| 106849 |
+
fp8_margin ...................................... 0
|
| 106850 |
+
fp8_param_gather ................................ False
|
| 106851 |
+
fp8_recipe ...................................... delayed
|
| 106852 |
+
fp8_wgrad ....................................... True
|
| 106853 |
+
fsdp_double_buffer .............................. False
|
| 106854 |
+
global_batch_size ............................... 1
|
| 106855 |
+
grad_reduce_in_bf16 ............................. False
|
| 106856 |
+
gradient_accumulation_fusion .................... True
|
| 106857 |
+
gradient_reduce_div_fusion ...................... True
|
| 106858 |
+
group_query_attention ........................... True
|
| 106859 |
+
head_lr_mult .................................... 1.0
|
| 106860 |
+
heterogeneous_layers_config_encoded_json ........ None
|
| 106861 |
+
heterogeneous_layers_config_path ................ None
|
| 106862 |
+
hidden_dropout .................................. 0.1
|
| 106863 |
+
hidden_size ..................................... 4096
|
| 106864 |
+
hierarchical_context_parallel_sizes ............. None
|
| 106865 |
+
high_priority_stream_groups ..................... []
|
| 106866 |
+
hybrid_attention_ratio .......................... 0.0
|
| 106867 |
+
hybrid_mlp_ratio ................................ 0.0
|
| 106868 |
+
hybrid_override_pattern ......................... None
|
| 106869 |
+
hysteresis ...................................... 2
|
| 106870 |
+
ict_head_size ................................... None
|
| 106871 |
+
ict_load ........................................ None
|
| 106872 |
+
img_h ........................................... 224
|
| 106873 |
+
img_w ........................................... 224
|
| 106874 |
+
indexer_batch_size .............................. 128
|
| 106875 |
+
indexer_log_interval ............................ 1000
|
| 106876 |
+
inference_batch_times_seqlen_threshold .......... -1
|
| 106877 |
+
inference_dynamic_batching ...................... False
|
| 106878 |
+
inference_dynamic_batching_buffer_guaranteed_fraction 0.2
|
| 106879 |
+
inference_dynamic_batching_buffer_overflow_factor None
|
| 106880 |
+
inference_dynamic_batching_buffer_size_gb ....... 40.0
|
| 106881 |
+
inference_dynamic_batching_chunk_size ........... 256
|
| 106882 |
+
inference_dynamic_batching_max_requests_override None
|
| 106883 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106884 |
+
inference_dynamic_batching_max_tokens_override .. None
|
| 106885 |
+
inference_max_batch_size ........................ 8
|
| 106886 |
+
inference_max_seq_length ........................ 2560
|
| 106887 |
+
inference_rng_tracker ........................... False
|
| 106888 |
+
init_method_std ................................. 0.02
|
| 106889 |
+
init_method_xavier_uniform ...................... False
|
| 106890 |
+
init_model_with_meta_device ..................... False
|
| 106891 |
+
initial_loss_scale .............................. 4294967296
|
| 106892 |
+
inprocess_active_world_size ..................... 64
|
| 106893 |
+
inprocess_barrier_timeout ....................... 120
|
| 106894 |
+
inprocess_completion_timeout .................... 120
|
| 106895 |
+
inprocess_empty_cuda_cache ...................... False
|
| 106896 |
+
inprocess_granularity ........................... node
|
| 106897 |
+
inprocess_hard_timeout .......................... 90
|
| 106898 |
+
inprocess_heartbeat_interval .................... 30
|
| 106899 |
+
inprocess_heartbeat_timeout ..................... 60
|
| 106900 |
+
inprocess_last_call_wait ........................ 1
|
| 106901 |
+
inprocess_max_iterations ........................ None
|
| 106902 |
+
inprocess_monitor_process_interval .............. 1.0
|
| 106903 |
+
inprocess_monitor_thread_interval ............... 1.0
|
| 106904 |
+
inprocess_progress_watchdog_interval ............ 1.0
|
| 106905 |
+
inprocess_restart ............................... False
|
| 106906 |
+
inprocess_soft_timeout .......................... 60
|
| 106907 |
+
inprocess_termination_grace_time ................ 1
|
| 106908 |
+
is_hybrid_model ................................. False
|
| 106909 |
+
iter_per_epoch .................................. 1250
|
| 106910 |
+
iterations_to_skip .............................. []
|
| 106911 |
+
keep_fp8_transpose_cache_when_using_custom_fsdp . False
|
| 106912 |
+
kv_channels ..................................... 64
|
| 106913 |
+
kv_lora_rank .................................... 32
|
| 106914 |
+
lazy_mpu_init ................................... None
|
| 106915 |
+
load ............................................ gpt-checkpoint
|
| 106916 |
+
load_model_opt_format ........................... False
|
| 106917 |
+
local_rank ...................................... 0
|
| 106918 |
+
log_interval .................................... 1
|
| 106919 |
+
log_loss_scale_to_tensorboard ................... True
|
| 106920 |
+
log_memory_to_tensorboard ....................... False
|
| 106921 |
+
log_num_zeros_in_grad ........................... False
|
| 106922 |
+
log_params_norm ................................. False
|
| 106923 |
+
log_progress .................................... False
|
| 106924 |
+
log_straggler ................................... False
|
| 106925 |
+
log_throughput .................................. False
|
| 106926 |
+
log_timers_to_tensorboard ....................... False
|
| 106927 |
+
log_validation_ppl_to_tensorboard ............... False
|
| 106928 |
+
log_world_size_to_tensorboard ................... False
|
| 106929 |
+
logging_level ................................... 0
|
| 106930 |
+
loss_scale ...................................... None
|
| 106931 |
+
loss_scale_window ............................... 1000
|
| 106932 |
+
lr .............................................. 0.0005
|
| 106933 |
+
lr_decay_iters .................................. 150000
|
| 106934 |
+
lr_decay_samples ................................ None
|
| 106935 |
+
lr_decay_style .................................. cosine
|
| 106936 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 106937 |
+
lr_warmup_fraction .............................. None
|
| 106938 |
+
lr_warmup_init .................................. 0.0
|
| 106939 |
+
lr_warmup_iters ................................. 2
|
| 106940 |
+
lr_warmup_samples ............................... 0
|
| 106941 |
+
lr_wsd_decay_iters .............................. None
|
| 106942 |
+
lr_wsd_decay_samples ............................ None
|
| 106943 |
+
lr_wsd_decay_style .............................. exponential
|
| 106944 |
+
main_grads_dtype ................................ torch.float32
|
| 106945 |
+
main_params_dtype ............................... torch.float32
|
| 106946 |
+
make_vocab_size_divisible_by .................... 128
|
| 106947 |
+
mamba_head_dim .................................. 64
|
| 106948 |
+
mamba_num_groups ................................ 8
|
| 106949 |
+
mamba_num_heads ................................. None
|
| 106950 |
+
mamba_state_dim ................................. 128
|
| 106951 |
+
manual_gc ....................................... False
|
| 106952 |
+
manual_gc_eval .................................. True
|
| 106953 |
+
manual_gc_interval .............................. 0
|
| 106954 |
+
mask_factor ..................................... 1.0
|
| 106955 |
+
mask_prob ....................................... 0.15
|
| 106956 |
+
mask_type ....................................... random
|
| 106957 |
+
masked_softmax_fusion ........................... True
|
| 106958 |
+
max_position_embeddings ......................... 81920
|
| 106959 |
+
max_tokens_to_oom ............................... 12000
|
| 106960 |
+
memory_snapshot_path ............................ snapshot.pickle
|
| 106961 |
+
merge_file ...................................... merges.txt
|
| 106962 |
+
micro_batch_size ................................ 1
|
| 106963 |
+
microbatch_group_size_per_vp_stage .............. None
|
| 106964 |
+
mid_level_dataset_surplus ....................... 0.005
|
| 106965 |
+
min_loss_scale .................................. 1.0
|
| 106966 |
+
min_lr .......................................... 0.0
|
| 106967 |
+
mlp_chunks_for_prefill .......................... 1
|
| 106968 |
+
mmap_bin_files .................................. True
|
| 106969 |
+
mock_data ....................................... True
|
| 106970 |
+
moe_apply_probs_on_input ........................ False
|
| 106971 |
+
moe_aux_loss_coeff .............................. 0.0
|
| 106972 |
+
moe_enable_deepep ............................... False
|
| 106973 |
+
moe_expert_capacity_factor ...................... None
|
| 106974 |
+
moe_extended_tp ................................. False
|
| 106975 |
+
moe_ffn_hidden_size ............................. None
|
| 106976 |
+
moe_grouped_gemm ................................ False
|
| 106977 |
+
moe_input_jitter_eps ............................ None
|
| 106978 |
+
moe_layer_freq .................................. 1
|
| 106979 |
+
moe_layer_recompute ............................. False
|
| 106980 |
+
moe_pad_expert_input_to_capacity ................ False
|
| 106981 |
+
moe_per_layer_logging ........................... False
|
| 106982 |
+
moe_permute_fusion .............................. False
|
| 106983 |
+
moe_router_bias_update_rate ..................... 0.001
|
| 106984 |
+
moe_router_dtype ................................ None
|
| 106985 |
+
moe_router_enable_expert_bias ................... False
|
| 106986 |
+
moe_router_force_load_balancing ................. False
|
| 106987 |
+
moe_router_group_topk ........................... None
|
| 106988 |
+
moe_router_load_balancing_type .................. aux_loss
|
| 106989 |
+
moe_router_num_groups ........................... None
|
| 106990 |
+
moe_router_padding_for_fp8 ...................... False
|
| 106991 |
+
moe_router_pre_softmax .......................... False
|
| 106992 |
+
moe_router_score_function ....................... softmax
|
| 106993 |
+
moe_router_topk ................................. 2
|
| 106994 |
+
moe_router_topk_scaling_factor .................. None
|
| 106995 |
+
moe_shared_expert_intermediate_size ............. None
|
| 106996 |
+
moe_shared_expert_overlap ....................... False
|
| 106997 |
+
moe_token_dispatcher_type ....................... allgather
|
| 106998 |
+
moe_token_drop_policy ........................... probs
|
| 106999 |
+
moe_use_legacy_grouped_gemm ..................... False
|
| 107000 |
+
moe_use_upcycling ............................... False
|
| 107001 |
+
moe_z_loss_coeff ................................ None
|
| 107002 |
+
mrope_section ................................... None
|
| 107003 |
+
mscale .......................................... 1.0
|
| 107004 |
+
mscale_all_dim .................................. 1.0
|
| 107005 |
+
mtp_loss_scaling_factor ......................... 0.1
|
| 107006 |
+
mtp_num_layers .................................. None
|
| 107007 |
+
multi_latent_attention .......................... False
|
| 107008 |
+
nccl_all_reduce_for_prefill ..................... False
|
| 107009 |
+
nccl_communicator_config_path ................... None
|
| 107010 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107011 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107012 |
+
nccl_ub ......................................... False
|
| 107013 |
+
no_load_optim ................................... None
|
| 107014 |
+
no_load_rng ..................................... None
|
| 107015 |
+
no_persist_layer_norm ........................... False
|
| 107016 |
+
no_rope_freq .................................... None
|
| 107017 |
+
no_save_optim ................................... None
|
| 107018 |
+
no_save_rng ..................................... None
|
| 107019 |
+
non_persistent_ckpt_type ........................ None
|
| 107020 |
+
non_persistent_global_ckpt_dir .................. None
|
| 107021 |
+
non_persistent_local_ckpt_algo .................. fully_parallel
|
| 107022 |
+
non_persistent_local_ckpt_dir ................... None
|
| 107023 |
+
non_persistent_save_interval .................... None
|
| 107024 |
+
norm_epsilon .................................... 1e-05
|
| 107025 |
+
normalization ................................... LayerNorm
|
| 107026 |
+
num_attention_heads ............................. 64
|
| 107027 |
+
num_channels .................................... 3
|
| 107028 |
+
num_classes ..................................... 1000
|
| 107029 |
+
num_dataset_builder_threads ..................... 1
|
| 107030 |
+
num_distributed_optimizer_instances ............. 1
|
| 107031 |
+
num_experts ..................................... None
|
| 107032 |
+
num_layers ...................................... 2
|
| 107033 |
+
num_layers_at_end_in_bf16 ....................... 1
|
| 107034 |
+
num_layers_at_start_in_bf16 ..................... 1
|
| 107035 |
+
num_layers_per_virtual_pipeline_stage ........... None
|
| 107036 |
+
num_query_groups ................................ 16
|
| 107037 |
+
num_virtual_stages_per_pipeline_rank ............ None
|
| 107038 |
+
num_workers ..................................... 2
|
| 107039 |
+
object_storage_cache_path ....................... None
|
| 107040 |
+
one_logger_async ................................ False
|
| 107041 |
+
one_logger_project .............................. megatron-lm
|
| 107042 |
+
one_logger_run_name ............................. None
|
| 107043 |
+
onnx_safe ....................................... None
|
| 107044 |
+
openai_gelu ..................................... False
|
| 107045 |
+
optimizer ....................................... adam
|
| 107046 |
+
optimizer_cpu_offload ........................... False
|
| 107047 |
+
optimizer_offload_fraction ...................... 1.0
|
| 107048 |
+
output_bert_embeddings .......................... False
|
| 107049 |
+
overlap_cpu_optimizer_d2h_h2d ................... False
|
| 107050 |
+
overlap_grad_reduce ............................. False
|
| 107051 |
+
overlap_p2p_comm ................................ False
|
| 107052 |
+
overlap_p2p_comm_warmup_flush ................... False
|
| 107053 |
+
overlap_param_gather ............................ False
|
| 107054 |
+
overlap_param_gather_with_optimizer_step ........ False
|
| 107055 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107056 |
+
override_opt_param_scheduler .................... False
|
| 107057 |
+
params_dtype .................................... torch.float16
|
| 107058 |
+
patch_dim ....................................... 16
|
| 107059 |
+
per_split_data_args_path ........................ None
|
| 107060 |
+
perform_initialization .......................... True
|
| 107061 |
+
pin_cpu_grads ................................... True
|
| 107062 |
+
pin_cpu_params .................................. True
|
| 107063 |
+
pipeline_model_parallel_comm_backend ............ None
|
| 107064 |
+
pipeline_model_parallel_size .................... 1
|
| 107065 |
+
pipeline_model_parallel_split_rank .............. None
|
| 107066 |
+
position_embedding_type ......................... learned_absolute
|
| 107067 |
+
pretrained_checkpoint ........................... None
|
| 107068 |
+
profile ......................................... False
|
| 107069 |
+
profile_ranks ................................... [0]
|
| 107070 |
+
profile_step_end ................................ 12
|
| 107071 |
+
profile_step_start .............................. 10
|
| 107072 |
+
q_lora_rank ..................................... None
|
| 107073 |
+
qk_head_dim ..................................... 128
|
| 107074 |
+
qk_l2_norm ...................................... False
|
| 107075 |
+
qk_layernorm .................................... False
|
| 107076 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107077 |
+
qk_pos_emb_head_dim ............................. 64
|
| 107078 |
+
query_in_block_prob ............................. 0.1
|
| 107079 |
+
rampup_batch_size ............................... None
|
| 107080 |
+
rank ............................................ 0
|
| 107081 |
+
recompute_granularity ........................... None
|
| 107082 |
+
recompute_method ................................ None
|
| 107083 |
+
recompute_modules ............................... None
|
| 107084 |
+
recompute_num_layers ............................ None
|
| 107085 |
+
record_memory_history ........................... False
|
| 107086 |
+
relative_attention_max_distance ................. 128
|
| 107087 |
+
relative_attention_num_buckets .................. 32
|
| 107088 |
+
replication ..................................... False
|
| 107089 |
+
replication_factor .............................. 2
|
| 107090 |
+
replication_jump ................................ None
|
| 107091 |
+
rerun_mode ...................................... disabled
|
| 107092 |
+
reset_attention_mask ............................ False
|
| 107093 |
+
reset_position_ids .............................. False
|
| 107094 |
+
result_rejected_tracker_filename ................ None
|
| 107095 |
+
retriever_report_topk_accuracies ................ []
|
| 107096 |
+
retriever_score_scaling ......................... False
|
| 107097 |
+
retriever_seq_length ............................ 256
|
| 107098 |
+
retro_add_retriever ............................. False
|
| 107099 |
+
retro_attention_gate ............................ 1
|
| 107100 |
+
retro_cyclic_train_iters ........................ None
|
| 107101 |
+
retro_encoder_attention_dropout ................. 0.1
|
| 107102 |
+
retro_encoder_hidden_dropout .................... 0.1
|
| 107103 |
+
retro_encoder_layers ............................ 2
|
| 107104 |
+
retro_num_neighbors ............................. 2
|
| 107105 |
+
retro_num_retrieved_chunks ...................... 2
|
| 107106 |
+
retro_project_dir ............................... None
|
| 107107 |
+
retro_verify_neighbor_count ..................... True
|
| 107108 |
+
rope_scaling_factor ............................. 8.0
|
| 107109 |
+
rotary_base ..................................... 10000
|
| 107110 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107111 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107112 |
+
rotary_interleaved .............................. False
|
| 107113 |
+
rotary_percent .................................. 1.0
|
| 107114 |
+
rotary_scaling_factor ........................... 1.0
|
| 107115 |
+
rotary_seq_len_interpolation_factor ............. None
|
| 107116 |
+
run_workload_inspector_server ................... False
|
| 107117 |
+
sample_rate ..................................... 1.0
|
| 107118 |
+
save ............................................ gpt-checkpoint
|
| 107119 |
+
save_interval ................................... 16
|
| 107120 |
+
scatter_gather_tensors_in_pipeline .............. True
|
| 107121 |
+
seed ............................................ 1234
|
| 107122 |
+
seq_length ...................................... 81920
|
| 107123 |
+
sequence_parallel ............................... False
|
| 107124 |
+
sgd_momentum .................................... 0.9
|
| 107125 |
+
short_seq_prob .................................. 0.1
|
| 107126 |
+
skip_train ...................................... False
|
| 107127 |
+
skipped_train_samples ........................... 0
|
| 107128 |
+
spec ............................................ None
|
| 107129 |
+
split ........................................... None
|
| 107130 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107131 |
+
squared_relu .................................... False
|
| 107132 |
+
start_weight_decay .............................. 0.1
|
| 107133 |
+
straggler_ctrlr_port ............................ 65535
|
| 107134 |
+
straggler_minmax_count .......................... 1
|
| 107135 |
+
suggested_communication_unit_size ............... None
|
| 107136 |
+
swiglu .......................................... False
|
| 107137 |
+
swin_backbone_type .............................. tiny
|
| 107138 |
+
symmetric_ar_type ............................... None
|
| 107139 |
+
te_rng_tracker .................................. False
|
| 107140 |
+
tensor_model_parallel_size ...................... 8
|
| 107141 |
+
tensorboard_dir ................................. tensorboard-logs/
|
| 107142 |
+
tensorboard_log_interval ........................ 1
|
| 107143 |
+
tensorboard_queue_size .......................... 1000
|
| 107144 |
+
test_data_path .................................. None
|
| 107145 |
+
test_mode ....................................... False
|
| 107146 |
+
tiktoken_num_special_tokens ..................... 1000
|
| 107147 |
+
tiktoken_pattern ................................ None
|
| 107148 |
+
tiktoken_special_tokens ......................... None
|
| 107149 |
+
timing_log_level ................................ 0
|
| 107150 |
+
timing_log_option ............................... minmax
|
| 107151 |
+
titles_data_path ................................ None
|
| 107152 |
+
tokenizer_model ................................. None
|
| 107153 |
+
tokenizer_type .................................. GPT2BPETokenizer
|
| 107154 |
+
torch_fsdp2_reshard_after_forward ............... True
|
| 107155 |
+
tp_comm_bootstrap_backend ....................... nccl
|
| 107156 |
+
tp_comm_bulk_dgrad .............................. True
|
| 107157 |
+
tp_comm_bulk_wgrad .............................. True
|
| 107158 |
+
tp_comm_overlap ................................. False
|
| 107159 |
+
tp_comm_overlap_ag .............................. True
|
| 107160 |
+
tp_comm_overlap_cfg ............................. None
|
| 107161 |
+
tp_comm_overlap_rs .............................. True
|
| 107162 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107163 |
+
tp_comm_overlap_rs_dgrad ........................ False
|
| 107164 |
+
tp_comm_split_ag ................................ True
|
| 107165 |
+
tp_comm_split_rs ................................ True
|
| 107166 |
+
train_data_path ................................. None
|
| 107167 |
+
train_iters ..................................... 10
|
| 107168 |
+
train_samples ................................... None
|
| 107169 |
+
train_sync_interval ............................. None
|
| 107170 |
+
transformer_impl ................................ transformer_engine
|
| 107171 |
+
transformer_pipeline_model_parallel_size ........ 1
|
| 107172 |
+
untie_embeddings_and_output_weights ............. False
|
| 107173 |
+
use_checkpoint_args ............................. False
|
| 107174 |
+
use_checkpoint_opt_param_scheduler .............. False
|
| 107175 |
+
use_cpu_initialization .......................... None
|
| 107176 |
+
use_custom_fsdp ................................. False
|
| 107177 |
+
use_dist_ckpt ................................... True
|
| 107178 |
+
use_dist_ckpt_deprecated ........................ False
|
| 107179 |
+
use_distributed_optimizer ....................... False
|
| 107180 |
+
use_flash_attn .................................. False
|
| 107181 |
+
use_legacy_models ............................... False
|
| 107182 |
+
use_mp_args_from_checkpoint_args ................ False
|
| 107183 |
+
use_one_sent_docs ............................... False
|
| 107184 |
+
use_persistent_ckpt_worker ...................... False
|
| 107185 |
+
use_precision_aware_optimizer ................... False
|
| 107186 |
+
use_pytorch_profiler ............................ False
|
| 107187 |
+
use_ring_exchange_p2p ........................... False
|
| 107188 |
+
use_rope_scaling ................................ False
|
| 107189 |
+
use_rotary_position_embeddings .................. False
|
| 107190 |
+
use_sharp ....................................... False
|
| 107191 |
+
use_tokenizer_model_from_checkpoint_args ........ True
|
| 107192 |
+
use_torch_fsdp2 ................................. False
|
| 107193 |
+
use_torch_optimizer_for_cpu_offload ............. False
|
| 107194 |
+
use_tp_pp_dp_mapping ............................ False
|
| 107195 |
+
v_head_dim ...................................... 128
|
| 107196 |
+
valid_data_path ................................. None
|
| 107197 |
+
variable_seq_lengths ............................ False
|
| 107198 |
+
virtual_pipeline_model_parallel_size ............ None
|
| 107199 |
+
vision_backbone_type ............................ vit
|
| 107200 |
+
vision_pretraining .............................. False
|
| 107201 |
+
vision_pretraining_type ......................... classify
|
| 107202 |
+
vocab_extra_ids ................................. 0
|
| 107203 |
+
vocab_file ...................................... vocab.json
|
| 107204 |
+
vocab_size ...................................... None
|
| 107205 |
+
wandb_exp_name ..................................
|
| 107206 |
+
wandb_project ...................................
|
| 107207 |
+
wandb_save_dir ..................................
|
| 107208 |
+
weight_decay .................................... 0.1
|
| 107209 |
+
weight_decay_incr_style ......................... constant
|
| 107210 |
+
wgrad_deferral_limit ............................ 0
|
| 107211 |
+
world_size ...................................... 64
|
| 107212 |
+
yaml_cfg ........................................ None
|
| 107213 |
+
-------------------- end of arguments ---------------------
|
| 107214 |
+
INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1
|
| 107215 |
+
> building GPT2BPETokenizer tokenizer ...
|
| 107216 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107217 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107218 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107219 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107220 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107221 |
+
> padded vocab (size: 50257) with 943 dummy tokens (new size: 51200)
|
| 107222 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107223 |
+
WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED
|
| 107224 |
+
> initializing torch distributed ...
|
| 107225 |
+
> initialized tensor model parallel with size 8
|
| 107226 |
+
> initialized pipeline model parallel with size 1
|
| 107227 |
+
> setting random seeds to 1234 ...
|
| 107228 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 107229 |
+
> compiling dataset index builder ...
|
| 107230 |
+
make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
|
| 107231 |
+
make: Nothing to be done for 'default'.
|
| 107232 |
+
make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
|
| 107233 |
+
>>> done with dataset index builder. Compilation time: 0.045 seconds
|
| 107234 |
+
WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations.
|
| 107235 |
+
> compiling and loading fused kernels ...
|
| 107236 |
+
>>> done with compiling and loading fused kernels. Compilation time: 2.847 seconds
|
| 107237 |
+
time to initialize megatron (seconds): 9.096
|
| 107238 |
+
[after megatron is initialized] datetime: 2025-06-21 20:28:50
|
| 107239 |
+
building GPT model ...
|
| 107240 |
+
>>> embedding
|
| 107241 |
+
>>> decoder
|
| 107242 |
+
>>> output_layer
|
| 107243 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888
|
| 107244 |
+
>>> embedding
|
| 107245 |
+
>>> decoder
|
| 107246 |
+
>>> output_layer
|
| 107247 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888
|
| 107248 |
+
>>> embedding
|
| 107249 |
+
>>> decoder
|
| 107250 |
+
>>> output_layer
|
| 107251 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888
|
| 107252 |
+
>>> embedding
|
| 107253 |
+
>>> decoder
|
| 107254 |
+
>>> output_layer
|
| 107255 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888
|
| 107256 |
+
>>> embedding
|
| 107257 |
+
>>> decoder
|
| 107258 |
+
>>> output_layer
|
| 107259 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888
|
| 107260 |
+
>>> embedding
|
| 107261 |
+
>>> decoder
|
| 107262 |
+
>>> output_layer
|
| 107263 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888
|
| 107264 |
+
>>> embedding
|
| 107265 |
+
>>> decoder
|
| 107266 |
+
>>> output_layer
|
| 107267 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888
|
| 107268 |
+
>>> embedding
|
| 107269 |
+
>>> decoder
|
| 107270 |
+
>>> output_layer
|
| 107271 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888
|
| 107272 |
+
>>> embedding
|
| 107273 |
+
>>> decoder
|
| 107274 |
+
>>> output_layer
|
| 107275 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888
|
| 107276 |
+
>>> embedding
|
| 107277 |
+
>>> decoder
|
| 107278 |
+
>>> output_layer
|
| 107279 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888
|
| 107280 |
+
>>> embedding
|
| 107281 |
+
>>> decoder
|
| 107282 |
+
>>> output_layer
|
| 107283 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888
|
| 107284 |
+
>>> embedding
|
| 107285 |
+
>>> decoder
|
| 107286 |
+
>>> output_layer
|
| 107287 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888
|
| 107288 |
+
>>> embedding
|
| 107289 |
+
>>> decoder
|
| 107290 |
+
>>> output_layer
|
| 107291 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888
|
| 107292 |
+
>>> embedding
|
| 107293 |
+
>>> decoder
|
| 107294 |
+
>>> output_layer
|
| 107295 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888>>> embedding
|
| 107296 |
+
|
| 107297 |
+
>>> decoder
|
| 107298 |
+
>>> output_layer
|
| 107299 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888
|
| 107300 |
+
>>> embedding
|
| 107301 |
+
>>> decoder
|
| 107302 |
+
>>> output_layer
|
| 107303 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888
|
| 107304 |
+
>>> embedding
|
| 107305 |
+
>>> decoder
|
| 107306 |
+
>>> output_layer
|
| 107307 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888
|
| 107308 |
+
>>> embedding
|
| 107309 |
+
>>> decoder
|
| 107310 |
+
>>> output_layer
|
| 107311 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888
|
| 107312 |
+
>>> embedding
|
| 107313 |
+
>>> decoder
|
| 107314 |
+
>>> output_layer
|
| 107315 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888
|
| 107316 |
+
>>> embedding
|
| 107317 |
+
>>> decoder
|
| 107318 |
+
>>> output_layer
|
| 107319 |
+
>>> embedding
|
| 107320 |
+
>>> decoder
|
| 107321 |
+
>>> output_layer
|
| 107322 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888
|
| 107323 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888
|
| 107324 |
+
>>> embedding
|
| 107325 |
+
>>> decoder
|
| 107326 |
+
>>> output_layer
|
| 107327 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888
|
| 107328 |
+
>>> embedding
|
| 107329 |
+
>>> decoder
|
| 107330 |
+
>>> output_layer
|
| 107331 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888
|
| 107332 |
+
>>> embedding
|
| 107333 |
+
>>> decoder
|
| 107334 |
+
>>> output_layer
|
| 107335 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888
|
| 107336 |
+
>>> embedding
|
| 107337 |
+
>>> decoder
|
| 107338 |
+
>>> output_layer
|
| 107339 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888
|
| 107340 |
+
>>> embedding
|
| 107341 |
+
>>> decoder
|
| 107342 |
+
>>> output_layer
|
| 107343 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888
|
| 107344 |
+
>>> embedding
|
| 107345 |
+
>>> decoder
|
| 107346 |
+
>>> output_layer
|
| 107347 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888
|
| 107348 |
+
>>> embedding
|
| 107349 |
+
>>> decoder
|
| 107350 |
+
>>> output_layer
|
| 107351 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888
|
| 107352 |
+
>>> embedding
|
| 107353 |
+
>>> decoder
|
| 107354 |
+
>>> output_layer
|
| 107355 |
+
>>> embedding
|
| 107356 |
+
>>> decoder
|
| 107357 |
+
>>> output_layer
|
| 107358 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888
|
| 107359 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888
|
| 107360 |
+
>>> embedding
|
| 107361 |
+
>>> decoder
|
| 107362 |
+
>>> output_layer
|
| 107363 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888
|
| 107364 |
+
>>> embedding
|
| 107365 |
+
>>> decoder
|
| 107366 |
+
>>> output_layer
|
| 107367 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888
|
| 107368 |
+
>>> embedding
|
| 107369 |
+
>>> decoder
|
| 107370 |
+
>>> output_layer
|
| 107371 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888
|
| 107372 |
+
>>> embedding
|
| 107373 |
+
>>> decoder
|
| 107374 |
+
>>> output_layer
|
| 107375 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888
|
| 107376 |
+
>>> embedding
|
| 107377 |
+
>>> decoder
|
| 107378 |
+
>>> output_layer
|
| 107379 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888
|
| 107380 |
+
>>> embedding
|
| 107381 |
+
>>> decoder
|
| 107382 |
+
>>> output_layer
|
| 107383 |
+
>>> embedding
|
| 107384 |
+
>>> decoder
|
| 107385 |
+
>>> output_layer
|
| 107386 |
+
>>> embedding
|
| 107387 |
+
>>> decoder
|
| 107388 |
+
>>> output_layer
|
| 107389 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888
|
| 107390 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888
|
| 107391 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888
|
| 107392 |
+
>>> embedding
|
| 107393 |
+
>>> decoder
|
| 107394 |
+
>>> output_layer
|
| 107395 |
+
>>> embedding
|
| 107396 |
+
>>> decoder
|
| 107397 |
+
>>> output_layer
|
| 107398 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888
|
| 107399 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888
|
| 107400 |
+
>>> embedding
|
| 107401 |
+
>>> decoder
|
| 107402 |
+
>>> output_layer
|
| 107403 |
+
>>> embedding
|
| 107404 |
+
>>> decoder
|
| 107405 |
+
>>> output_layer
|
| 107406 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888
|
| 107407 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888
|
| 107408 |
+
>>> embedding
|
| 107409 |
+
>>> decoder
|
| 107410 |
+
>>> output_layer
|
| 107411 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888
|
| 107412 |
+
>>> embedding
|
| 107413 |
+
>>> decoder
|
| 107414 |
+
>>> output_layer
|
| 107415 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888
|
| 107416 |
+
>>> embedding
|
| 107417 |
+
>>> decoder
|
| 107418 |
+
>>> output_layer
|
| 107419 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888
|
| 107420 |
+
>>> embedding
|
| 107421 |
+
>>> decoder
|
| 107422 |
+
>>> output_layer
|
| 107423 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888
|
| 107424 |
+
>>> embedding
|
| 107425 |
+
>>> decoder
|
| 107426 |
+
>>> output_layer
|
| 107427 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888
|
| 107428 |
+
>>> embedding
|
| 107429 |
+
>>> decoder
|
| 107430 |
+
>>> output_layer
|
| 107431 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888
|
| 107432 |
+
>>> embedding
|
| 107433 |
+
>>> decoder
|
| 107434 |
+
>>> output_layer
|
| 107435 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888
|
| 107436 |
+
>>> embedding
|
| 107437 |
+
>>> decoder
|
| 107438 |
+
>>> output_layer
|
| 107439 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888
|
| 107440 |
+
>>> embedding
|
| 107441 |
+
>>> decoder
|
| 107442 |
+
>>> output_layer
|
| 107443 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888
|
| 107444 |
+
>>> embedding
|
| 107445 |
+
>>> decoder
|
| 107446 |
+
>>> output_layer
|
| 107447 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888
|
| 107448 |
+
>>> embedding
|
| 107449 |
+
>>> decoder
|
| 107450 |
+
>>> output_layer
|
| 107451 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888
|
| 107452 |
+
>>> embedding
|
| 107453 |
+
>>> decoder
|
| 107454 |
+
>>> output_layer
|
| 107455 |
+
>>> embedding
|
| 107456 |
+
>>> decoder
|
| 107457 |
+
>>> output_layer
|
| 107458 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888
|
| 107459 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888
|
| 107460 |
+
>>> embedding
|
| 107461 |
+
>>> decoder
|
| 107462 |
+
>>> output_layer
|
| 107463 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888
|
| 107464 |
+
>>> embedding
|
| 107465 |
+
>>> decoder
|
| 107466 |
+
>>> output_layer
|
| 107467 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888
|
| 107468 |
+
INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False)
|
| 107469 |
+
INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1
|
| 107470 |
+
Params for bucket 1 (405861888 elements, 405861888 padded size):
|
| 107471 |
+
module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias
|
| 107472 |
+
module.decoder.layers.0.mlp.linear_fc2.weight
|
| 107473 |
+
module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias
|
| 107474 |
+
module.decoder.final_layernorm.weight
|
| 107475 |
+
module.decoder.layers.1.mlp.linear_fc1.bias
|
| 107476 |
+
module.decoder.layers.0.mlp.linear_fc2.bias
|
| 107477 |
+
module.decoder.layers.1.self_attention.linear_qkv.weight
|
| 107478 |
+
module.decoder.layers.1.self_attention.linear_proj.weight
|
| 107479 |
+
module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight
|
| 107480 |
+
module.decoder.layers.0.self_attention.linear_qkv.bias
|
| 107481 |
+
module.embedding.word_embeddings.weight
|
| 107482 |
+
module.decoder.layers.0.mlp.linear_fc1.weight
|
| 107483 |
+
module.decoder.layers.1.mlp.linear_fc2.weight
|
| 107484 |
+
module.decoder.layers.1.self_attention.linear_proj.bias
|
| 107485 |
+
module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight
|
| 107486 |
+
module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias
|
| 107487 |
+
module.decoder.layers.0.self_attention.linear_proj.weight
|
| 107488 |
+
module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight
|
| 107489 |
+
module.decoder.layers.1.self_attention.linear_qkv.bias
|
| 107490 |
+
module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias
|
| 107491 |
+
module.decoder.layers.0.self_attention.linear_proj.bias
|
| 107492 |
+
module.decoder.layers.1.mlp.linear_fc1.weight
|
| 107493 |
+
module.decoder.layers.0.mlp.linear_fc1.bias
|
| 107494 |
+
module.embedding.position_embeddings.weight
|
| 107495 |
+
module.decoder.final_layernorm.bias
|
| 107496 |
+
module.decoder.layers.1.mlp.linear_fc2.bias
|
| 107497 |
+
module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight
|
| 107498 |
+
module.decoder.layers.0.self_attention.linear_qkv.weight
|
| 107499 |
+
INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=<megatron.core.timers.Timers object at 0x14cd927a5df0>, config_logger_dir='')
|
| 107500 |
+
INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine
|
| 107501 |
+
>>> embedding
|
| 107502 |
+
>>> decoder
|
| 107503 |
+
>>> output_layer
|
| 107504 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888
|
| 107505 |
+
>>> embedding
|
| 107506 |
+
>>> decoder
|
| 107507 |
+
>>> output_layer
|
| 107508 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888
|
| 107509 |
+
>>> embedding
|
| 107510 |
+
>>> decoder
|
| 107511 |
+
>>> output_layer
|
| 107512 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888
|
| 107513 |
+
>>> embedding
|
| 107514 |
+
>>> decoder
|
| 107515 |
+
>>> output_layer
|
| 107516 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888
|
| 107517 |
+
>>> embedding
|
| 107518 |
+
>>> decoder
|
| 107519 |
+
>>> output_layer
|
| 107520 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888
|
| 107521 |
+
>>> embedding
|
| 107522 |
+
>>> decoder
|
| 107523 |
+
>>> output_layer
|
| 107524 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888
|
| 107525 |
+
>>> embedding
|
| 107526 |
+
>>> decoder
|
| 107527 |
+
>>> output_layer
|
| 107528 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888
|
| 107529 |
+
loading distributed checkpoint from gpt-checkpoint at iteration 10
|
| 107530 |
+
Running ctx_length=98304, TP_SIZE=8, CP_SIZE=8, BATCH_SIZE=1
|
| 107531 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 107532 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 107533 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 107534 |
+
--------------------------------
|
| 107535 |
+
CTX_LENGTH: 98304
|
| 107536 |
+
TP_SIZE: 8
|
| 107537 |
+
CP_SIZE: 8
|
| 107538 |
+
--------------------------------
|
| 107539 |
+
CTX_LENGTH: 98304
|
| 107540 |
+
TP_SIZE: 8
|
| 107541 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 107542 |
+
CP_SIZE: 8
|
| 107543 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 107544 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 107545 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 107546 |
+
--------------------------------
|
| 107547 |
+
CTX_LENGTH: 98304
|
| 107548 |
+
TP_SIZE: 8
|
| 107549 |
+
CP_SIZE: 8
|
| 107550 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 107551 |
+
--------------------------------
|
| 107552 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 107553 |
+
--------------------------------
|
| 107554 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 107555 |
+
--------------------------------
|
| 107556 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 107557 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 107558 |
+
--------------------------------
|
| 107559 |
+
CTX_LENGTH: 98304
|
| 107560 |
+
TP_SIZE: 8
|
| 107561 |
+
CP_SIZE: 8
|
| 107562 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 107563 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 107564 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 107565 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 107566 |
+
--------------------------------
|
| 107567 |
+
CTX_LENGTH: 98304
|
| 107568 |
+
TP_SIZE: 8
|
| 107569 |
+
CP_SIZE: 8
|
| 107570 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 107571 |
+
--------------------------------
|
| 107572 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 107573 |
+
--------------------------------
|
| 107574 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 107575 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 107576 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 107577 |
+
--------------------------------
|
| 107578 |
+
CTX_LENGTH: 98304
|
| 107579 |
+
TP_SIZE: 8
|
| 107580 |
+
CP_SIZE: 8
|
| 107581 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 107582 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 107583 |
+
--------------------------------
|
| 107584 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 107585 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 107586 |
+
--------------------------------
|
| 107587 |
+
CTX_LENGTH: 98304
|
| 107588 |
+
TP_SIZE: 8
|
| 107589 |
+
CP_SIZE: 8
|
| 107590 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 107591 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 107592 |
+
--------------------------------
|
| 107593 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 107594 |
+
Cleaning up checkpoint directory: gpt-checkpoint
|
| 107595 |
+
--------------------------------
|
| 107596 |
+
CTX_LENGTH: 98304
|
| 107597 |
+
TP_SIZE: 8
|
| 107598 |
+
CP_SIZE: 8
|
| 107599 |
+
CHECKPOINT_PATH: gpt-checkpoint
|
| 107600 |
+
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 107601 |
+
--------------------------------
|
| 107602 |
+
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
attnserver.run_attnserver.slurm.sh.343190.err.log
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e225814f9cd1acd91de38727cfd656a1fe6cb11631fea0c0c4dc4651e5587941
|
| 3 |
+
size 25988084
|
attnserver.run_attnserver.slurm.sh.343190.out.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343191.err.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343191.out.log
CHANGED
|
@@ -77131,3 +77131,880 @@ CHECKPOINT_PATH: gpt-checkpoint
|
|
| 77131 |
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 77132 |
--------------------------------
|
| 77133 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77131 |
PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
|
| 77132 |
--------------------------------
|
| 77133 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
|
| 77134 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77135 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77136 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77137 |
+
using world size: 64, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0
|
| 77138 |
+
Number of virtual stages per pipeline stage: None
|
| 77139 |
+
WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used
|
| 77140 |
+
using torch.float16 for parameters ...
|
| 77141 |
+
------------------------ arguments ------------------------
|
| 77142 |
+
account_for_embedding_in_pipeline_split ......... False
|
| 77143 |
+
account_for_loss_in_pipeline_split .............. False
|
| 77144 |
+
accumulate_allreduce_grads_in_fp32 .............. False
|
| 77145 |
+
adam_beta1 ...................................... 0.9
|
| 77146 |
+
adam_beta2 ...................................... 0.999
|
| 77147 |
+
adam_eps ........................................ 1e-08
|
| 77148 |
+
add_bias_linear ................................. True
|
| 77149 |
+
add_position_embedding .......................... True
|
| 77150 |
+
add_qkv_bias .................................... True
|
| 77151 |
+
adlr_autoresume ................................. False
|
| 77152 |
+
adlr_autoresume_interval ........................ 1000
|
| 77153 |
+
align_grad_reduce ............................... True
|
| 77154 |
+
align_param_gather .............................. False
|
| 77155 |
+
app_tag_run_name ................................ None
|
| 77156 |
+
app_tag_run_version ............................. 0.0.0
|
| 77157 |
+
apply_layernorm_1p .............................. False
|
| 77158 |
+
apply_query_key_layer_scaling ................... False
|
| 77159 |
+
apply_residual_connection_post_layernorm ........ False
|
| 77160 |
+
apply_rope_fusion ............................... False
|
| 77161 |
+
async_save ...................................... None
|
| 77162 |
+
async_tensor_model_parallel_allreduce ........... True
|
| 77163 |
+
attention_backend ............................... AttnBackend.auto
|
| 77164 |
+
attention_dropout ............................... 0.1
|
| 77165 |
+
attention_softmax_in_fp32 ....................... False
|
| 77166 |
+
auto_detect_ckpt_format ......................... False
|
| 77167 |
+
barrier_with_L1_time ............................ True
|
| 77168 |
+
bert_binary_head ................................ True
|
| 77169 |
+
bert_embedder_type .............................. megatron
|
| 77170 |
+
bert_load ....................................... None
|
| 77171 |
+
bf16 ............................................ False
|
| 77172 |
+
bias_dropout_fusion ............................. True
|
| 77173 |
+
bias_gelu_fusion ................................ True
|
| 77174 |
+
bias_swiglu_fusion .............................. True
|
| 77175 |
+
biencoder_projection_dim ........................ 0
|
| 77176 |
+
biencoder_shared_query_context_model ............ False
|
| 77177 |
+
block_data_path ................................. None
|
| 77178 |
+
calc_ft_timeouts ................................ False
|
| 77179 |
+
calculate_per_token_loss ........................ False
|
| 77180 |
+
check_for_large_grads ........................... False
|
| 77181 |
+
check_for_nan_in_loss_and_grad .................. False
|
| 77182 |
+
check_for_spiky_loss ............................ False
|
| 77183 |
+
check_weight_hash_across_dp_replicas_interval ... None
|
| 77184 |
+
ckpt_assume_constant_structure .................. False
|
| 77185 |
+
ckpt_convert_format ............................. None
|
| 77186 |
+
ckpt_convert_save ............................... None
|
| 77187 |
+
ckpt_convert_update_legacy_dist_opt_format ...... False
|
| 77188 |
+
ckpt_format ..................................... torch_dist
|
| 77189 |
+
ckpt_fully_parallel_load ........................ False
|
| 77190 |
+
ckpt_fully_parallel_save ........................ True
|
| 77191 |
+
ckpt_fully_parallel_save_deprecated ............. False
|
| 77192 |
+
ckpt_step ....................................... None
|
| 77193 |
+
classes_fraction ................................ 1.0
|
| 77194 |
+
clip_grad ....................................... 1.0
|
| 77195 |
+
clone_scatter_output_in_embedding ............... True
|
| 77196 |
+
config_logger_dir ...............................
|
| 77197 |
+
consumed_train_samples .......................... 0
|
| 77198 |
+
consumed_valid_samples .......................... 0
|
| 77199 |
+
context_parallel_size ........................... 8
|
| 77200 |
+
cp_comm_type .................................... ['p2p']
|
| 77201 |
+
create_attention_mask_in_dataloader ............. True
|
| 77202 |
+
cross_entropy_fusion_impl ....................... native
|
| 77203 |
+
cross_entropy_loss_fusion ....................... False
|
| 77204 |
+
cuda_graph_scope ................................ full
|
| 77205 |
+
cuda_graph_warmup_steps ......................... 3
|
| 77206 |
+
data_args_path .................................. None
|
| 77207 |
+
data_cache_path ................................. None
|
| 77208 |
+
data_parallel_random_init ....................... False
|
| 77209 |
+
data_parallel_sharding_strategy ................. no_shard
|
| 77210 |
+
data_parallel_size .............................. 1
|
| 77211 |
+
data_path ....................................... None
|
| 77212 |
+
data_per_class_fraction ......................... 1.0
|
| 77213 |
+
data_sharding ................................... True
|
| 77214 |
+
dataloader_type ................................. single
|
| 77215 |
+
ddp_average_in_collective ....................... False
|
| 77216 |
+
ddp_bucket_size ................................. None
|
| 77217 |
+
ddp_num_buckets ................................. None
|
| 77218 |
+
ddp_pad_buckets_for_high_nccl_busbw ............. False
|
| 77219 |
+
decoder_first_pipeline_num_layers ............... None
|
| 77220 |
+
decoder_last_pipeline_num_layers ................ None
|
| 77221 |
+
decoder_num_layers .............................. None
|
| 77222 |
+
decoder_seq_length .............................. None
|
| 77223 |
+
decoupled_lr .................................... None
|
| 77224 |
+
decoupled_min_lr ................................ None
|
| 77225 |
+
decrease_batch_size_if_needed ................... False
|
| 77226 |
+
defer_embedding_wgrad_compute ................... False
|
| 77227 |
+
deprecated_use_mcore_models ..................... False
|
| 77228 |
+
deterministic_mode .............................. False
|
| 77229 |
+
dino_bottleneck_size ............................ 256
|
| 77230 |
+
dino_freeze_last_layer .......................... 1
|
| 77231 |
+
dino_head_hidden_size ........................... 2048
|
| 77232 |
+
dino_local_crops_number ......................... 10
|
| 77233 |
+
dino_local_img_size ............................. 96
|
| 77234 |
+
dino_norm_last_layer ............................ False
|
| 77235 |
+
dino_teacher_temp ............................... 0.07
|
| 77236 |
+
dino_warmup_teacher_temp ........................ 0.04
|
| 77237 |
+
dino_warmup_teacher_temp_epochs ................. 30
|
| 77238 |
+
disable_bf16_reduced_precision_matmul ........... False
|
| 77239 |
+
disable_mamba_mem_eff_path ...................... False
|
| 77240 |
+
disable_straggler_on_startup .................... False
|
| 77241 |
+
dist_ckpt_format_deprecated ..................... None
|
| 77242 |
+
dist_ckpt_strictness ............................ assume_ok_unexpected
|
| 77243 |
+
distribute_saved_activations .................... False
|
| 77244 |
+
distributed_backend ............................. nccl
|
| 77245 |
+
distributed_timeout_minutes ..................... 10
|
| 77246 |
+
embedding_path .................................. None
|
| 77247 |
+
empty_unused_memory_level ....................... 0
|
| 77248 |
+
enable_cuda_graph ............................... False
|
| 77249 |
+
enable_ft_package ............................... False
|
| 77250 |
+
enable_gloo_process_groups ...................... True
|
| 77251 |
+
enable_msc ...................................... True
|
| 77252 |
+
enable_one_logger ............................... True
|
| 77253 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77254 |
+
encoder_num_layers .............................. 2
|
| 77255 |
+
encoder_pipeline_model_parallel_size ............ 0
|
| 77256 |
+
encoder_seq_length .............................. 131072
|
| 77257 |
+
encoder_tensor_model_parallel_size .............. 0
|
| 77258 |
+
end_weight_decay ................................ 0.1
|
| 77259 |
+
eod_mask_loss ................................... False
|
| 77260 |
+
error_injection_rate ............................ 0
|
| 77261 |
+
error_injection_type ............................ transient_error
|
| 77262 |
+
eval_interval ................................... 16
|
| 77263 |
+
eval_iters ...................................... 1
|
| 77264 |
+
evidence_data_path .............................. None
|
| 77265 |
+
exit_duration_in_mins ........................... None
|
| 77266 |
+
exit_interval ................................... None
|
| 77267 |
+
exit_on_missing_checkpoint ...................... False
|
| 77268 |
+
exit_signal_handler ............................. False
|
| 77269 |
+
exp_avg_dtype ................................... torch.float32
|
| 77270 |
+
exp_avg_sq_dtype ................................ torch.float32
|
| 77271 |
+
expert_model_parallel_size ...................... 1
|
| 77272 |
+
expert_tensor_parallel_size ..................... 8
|
| 77273 |
+
external_cuda_graph ............................. False
|
| 77274 |
+
ffn_hidden_size ................................. 16384
|
| 77275 |
+
finetune ........................................ False
|
| 77276 |
+
first_last_layers_bf16 .......................... False
|
| 77277 |
+
flash_decode .................................... False
|
| 77278 |
+
fp16 ............................................ True
|
| 77279 |
+
fp16_lm_cross_entropy ........................... False
|
| 77280 |
+
fp32_residual_connection ........................ False
|
| 77281 |
+
fp8 ............................................. None
|
| 77282 |
+
fp8_amax_compute_algo ........................... most_recent
|
| 77283 |
+
fp8_amax_history_len ............................ 1
|
| 77284 |
+
fp8_interval .................................... 1
|
| 77285 |
+
fp8_margin ...................................... 0
|
| 77286 |
+
fp8_param_gather ................................ False
|
| 77287 |
+
fp8_recipe ...................................... delayed
|
| 77288 |
+
fp8_wgrad ....................................... True
|
| 77289 |
+
fsdp_double_buffer .............................. False
|
| 77290 |
+
global_batch_size ............................... 1
|
| 77291 |
+
grad_reduce_in_bf16 ............................. False
|
| 77292 |
+
gradient_accumulation_fusion .................... True
|
| 77293 |
+
gradient_reduce_div_fusion ...................... True
|
| 77294 |
+
group_query_attention ........................... True
|
| 77295 |
+
head_lr_mult .................................... 1.0
|
| 77296 |
+
heterogeneous_layers_config_encoded_json ........ None
|
| 77297 |
+
heterogeneous_layers_config_path ................ None
|
| 77298 |
+
hidden_dropout .................................. 0.1
|
| 77299 |
+
hidden_size ..................................... 4096
|
| 77300 |
+
hierarchical_context_parallel_sizes ............. None
|
| 77301 |
+
high_priority_stream_groups ..................... []
|
| 77302 |
+
hybrid_attention_ratio .......................... 0.0
|
| 77303 |
+
hybrid_mlp_ratio ................................ 0.0
|
| 77304 |
+
hybrid_override_pattern ......................... None
|
| 77305 |
+
hysteresis ...................................... 2
|
| 77306 |
+
ict_head_size ................................... None
|
| 77307 |
+
ict_load ........................................ None
|
| 77308 |
+
img_h ........................................... 224
|
| 77309 |
+
img_w ........................................... 224
|
| 77310 |
+
indexer_batch_size .............................. 128
|
| 77311 |
+
indexer_log_interval ............................ 1000
|
| 77312 |
+
inference_batch_times_seqlen_threshold .......... -1
|
| 77313 |
+
inference_dynamic_batching ...................... False
|
| 77314 |
+
inference_dynamic_batching_buffer_guaranteed_fraction 0.2
|
| 77315 |
+
inference_dynamic_batching_buffer_overflow_factor None
|
| 77316 |
+
inference_dynamic_batching_buffer_size_gb ....... 40.0
|
| 77317 |
+
inference_dynamic_batching_chunk_size ........... 256
|
| 77318 |
+
inference_dynamic_batching_max_requests_override None
|
| 77319 |
+
inference_dynamic_batching_max_tokens_override .. None
|
| 77320 |
+
inference_max_batch_size ........................ 8
|
| 77321 |
+
inference_max_seq_length ........................ 2560
|
| 77322 |
+
inference_rng_tracker ........................... False
|
| 77323 |
+
init_method_std ................................. 0.02
|
| 77324 |
+
init_method_xavier_uniform ...................... False
|
| 77325 |
+
init_model_with_meta_device ..................... False
|
| 77326 |
+
initial_loss_scale .............................. 4294967296
|
| 77327 |
+
inprocess_active_world_size ..................... 64
|
| 77328 |
+
inprocess_barrier_timeout ....................... 120
|
| 77329 |
+
inprocess_completion_timeout .................... 120
|
| 77330 |
+
inprocess_empty_cuda_cache ...................... False
|
| 77331 |
+
inprocess_granularity ........................... node
|
| 77332 |
+
inprocess_hard_timeout .......................... 90
|
| 77333 |
+
inprocess_heartbeat_interval .................... 30
|
| 77334 |
+
inprocess_heartbeat_timeout ..................... 60
|
| 77335 |
+
inprocess_last_call_wait ........................ 1
|
| 77336 |
+
inprocess_max_iterations ........................ None
|
| 77337 |
+
inprocess_monitor_process_interval .............. 1.0
|
| 77338 |
+
inprocess_monitor_thread_interval ............... 1.0
|
| 77339 |
+
inprocess_progress_watchdog_interval ............ 1.0
|
| 77340 |
+
inprocess_restart ............................... False
|
| 77341 |
+
inprocess_soft_timeout .......................... 60
|
| 77342 |
+
inprocess_termination_grace_time ................ 1
|
| 77343 |
+
is_hybrid_model ................................. False
|
| 77344 |
+
iter_per_epoch .................................. 1250
|
| 77345 |
+
iterations_to_skip .............................. []
|
| 77346 |
+
keep_fp8_transpose_cache_when_using_custom_fsdp . False
|
| 77347 |
+
kv_channels ..................................... 64
|
| 77348 |
+
kv_lora_rank .................................... 32
|
| 77349 |
+
lazy_mpu_init ................................... None
|
| 77350 |
+
load ............................................ gpt-checkpoint
|
| 77351 |
+
load_model_opt_format ........................... False
|
| 77352 |
+
local_rank ...................................... 0
|
| 77353 |
+
log_interval .................................... 1
|
| 77354 |
+
log_loss_scale_to_tensorboard ................... True
|
| 77355 |
+
log_memory_to_tensorboard ....................... False
|
| 77356 |
+
log_num_zeros_in_grad ........................... False
|
| 77357 |
+
log_params_norm ................................. False
|
| 77358 |
+
log_progress .................................... False
|
| 77359 |
+
log_straggler ................................... False
|
| 77360 |
+
log_throughput .................................. False
|
| 77361 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77362 |
+
log_timers_to_tensorboard ....................... False
|
| 77363 |
+
log_validation_ppl_to_tensorboard ............... False
|
| 77364 |
+
log_world_size_to_tensorboard ................... False
|
| 77365 |
+
logging_level ................................... 0
|
| 77366 |
+
loss_scale ...................................... None
|
| 77367 |
+
loss_scale_window ............................... 1000
|
| 77368 |
+
lr .............................................. 0.0005
|
| 77369 |
+
lr_decay_iters .................................. 150000
|
| 77370 |
+
lr_decay_samples ................................ None
|
| 77371 |
+
lr_decay_style .................................. cosine
|
| 77372 |
+
lr_warmup_fraction .............................. None
|
| 77373 |
+
lr_warmup_init .................................. 0.0
|
| 77374 |
+
lr_warmup_iters ................................. 2
|
| 77375 |
+
lr_warmup_samples ............................... 0
|
| 77376 |
+
lr_wsd_decay_iters .............................. None
|
| 77377 |
+
lr_wsd_decay_samples ............................ None
|
| 77378 |
+
lr_wsd_decay_style .............................. exponential
|
| 77379 |
+
main_grads_dtype ................................ torch.float32
|
| 77380 |
+
main_params_dtype ............................... torch.float32
|
| 77381 |
+
make_vocab_size_divisible_by .................... 128
|
| 77382 |
+
mamba_head_dim .................................. 64
|
| 77383 |
+
mamba_num_groups ................................ 8
|
| 77384 |
+
mamba_num_heads ................................. None
|
| 77385 |
+
mamba_state_dim ................................. 128
|
| 77386 |
+
manual_gc ....................................... False
|
| 77387 |
+
manual_gc_eval .................................. True
|
| 77388 |
+
manual_gc_interval .............................. 0
|
| 77389 |
+
mask_factor ..................................... 1.0
|
| 77390 |
+
mask_prob ....................................... 0.15
|
| 77391 |
+
mask_type ....................................... random
|
| 77392 |
+
masked_softmax_fusion ........................... True
|
| 77393 |
+
max_position_embeddings ......................... 131072
|
| 77394 |
+
max_tokens_to_oom ............................... 12000
|
| 77395 |
+
memory_snapshot_path ............................ snapshot.pickle
|
| 77396 |
+
merge_file ...................................... merges.txt
|
| 77397 |
+
micro_batch_size ................................ 1
|
| 77398 |
+
microbatch_group_size_per_vp_stage .............. None
|
| 77399 |
+
mid_level_dataset_surplus ....................... 0.005
|
| 77400 |
+
min_loss_scale .................................. 1.0
|
| 77401 |
+
min_lr .......................................... 0.0
|
| 77402 |
+
mlp_chunks_for_prefill .......................... 1
|
| 77403 |
+
mmap_bin_files .................................. True
|
| 77404 |
+
mock_data ....................................... True
|
| 77405 |
+
moe_apply_probs_on_input ........................ False
|
| 77406 |
+
moe_aux_loss_coeff .............................. 0.0
|
| 77407 |
+
moe_enable_deepep ............................... False
|
| 77408 |
+
moe_expert_capacity_factor ...................... None
|
| 77409 |
+
moe_extended_tp ................................. False
|
| 77410 |
+
moe_ffn_hidden_size ............................. None
|
| 77411 |
+
moe_grouped_gemm ................................ False
|
| 77412 |
+
moe_input_jitter_eps ............................ None
|
| 77413 |
+
moe_layer_freq .................................. 1
|
| 77414 |
+
moe_layer_recompute ............................. False
|
| 77415 |
+
moe_pad_expert_input_to_capacity ................ False
|
| 77416 |
+
moe_per_layer_logging ........................... False
|
| 77417 |
+
moe_permute_fusion .............................. False
|
| 77418 |
+
moe_router_bias_update_rate ..................... 0.001
|
| 77419 |
+
moe_router_dtype ................................ None
|
| 77420 |
+
moe_router_enable_expert_bias ................... False
|
| 77421 |
+
moe_router_force_load_balancing ................. False
|
| 77422 |
+
moe_router_group_topk ........................... None
|
| 77423 |
+
moe_router_load_balancing_type .................. aux_loss
|
| 77424 |
+
moe_router_num_groups ........................... None
|
| 77425 |
+
moe_router_padding_for_fp8 ...................... False
|
| 77426 |
+
moe_router_pre_softmax .......................... False
|
| 77427 |
+
moe_router_score_function ....................... softmax
|
| 77428 |
+
moe_router_topk ................................. 2
|
| 77429 |
+
moe_router_topk_scaling_factor .................. None
|
| 77430 |
+
moe_shared_expert_intermediate_size ............. None
|
| 77431 |
+
moe_shared_expert_overlap ....................... False
|
| 77432 |
+
moe_token_dispatcher_type ....................... allgather
|
| 77433 |
+
moe_token_drop_policy ........................... probs
|
| 77434 |
+
moe_use_legacy_grouped_gemm ..................... False
|
| 77435 |
+
moe_use_upcycling ............................... False
|
| 77436 |
+
moe_z_loss_coeff ................................ None
|
| 77437 |
+
mrope_section ................................... None
|
| 77438 |
+
mscale .......................................... 1.0
|
| 77439 |
+
mscale_all_dim .................................. 1.0
|
| 77440 |
+
mtp_loss_scaling_factor ......................... 0.1
|
| 77441 |
+
mtp_num_layers .................................. None
|
| 77442 |
+
multi_latent_attention .......................... False
|
| 77443 |
+
nccl_all_reduce_for_prefill ..................... False
|
| 77444 |
+
nccl_communicator_config_path ................... None
|
| 77445 |
+
nccl_ub ......................................... False
|
| 77446 |
+
no_load_optim ................................... None
|
| 77447 |
+
no_load_rng ..................................... None
|
| 77448 |
+
no_persist_layer_norm ........................... False
|
| 77449 |
+
no_rope_freq .................................... None
|
| 77450 |
+
no_save_optim ................................... None
|
| 77451 |
+
no_save_rng ..................................... None
|
| 77452 |
+
non_persistent_ckpt_type ........................ None
|
| 77453 |
+
non_persistent_global_ckpt_dir .................. None
|
| 77454 |
+
non_persistent_local_ckpt_algo .................. fully_parallel
|
| 77455 |
+
non_persistent_local_ckpt_dir ................... None
|
| 77456 |
+
non_persistent_save_interval .................... None
|
| 77457 |
+
norm_epsilon .................................... 1e-05
|
| 77458 |
+
normalization ................................... LayerNorm
|
| 77459 |
+
num_attention_heads ............................. 64
|
| 77460 |
+
num_channels .................................... 3
|
| 77461 |
+
num_classes ..................................... 1000
|
| 77462 |
+
num_dataset_builder_threads ..................... 1
|
| 77463 |
+
num_distributed_optimizer_instances ............. 1
|
| 77464 |
+
num_experts ..................................... None
|
| 77465 |
+
num_layers ...................................... 2
|
| 77466 |
+
num_layers_at_end_in_bf16 ....................... 1
|
| 77467 |
+
num_layers_at_start_in_bf16 ..................... 1
|
| 77468 |
+
num_layers_per_virtual_pipeline_stage ........... None
|
| 77469 |
+
num_query_groups ................................ 16
|
| 77470 |
+
num_virtual_stages_per_pipeline_rank ............ None
|
| 77471 |
+
num_workers ..................................... 2
|
| 77472 |
+
object_storage_cache_path ....................... None
|
| 77473 |
+
one_logger_async ................................ False
|
| 77474 |
+
one_logger_project .............................. megatron-lm
|
| 77475 |
+
one_logger_run_name ............................. None
|
| 77476 |
+
onnx_safe ....................................... None
|
| 77477 |
+
openai_gelu ..................................... False
|
| 77478 |
+
optimizer ....................................... adam
|
| 77479 |
+
optimizer_cpu_offload ........................... False
|
| 77480 |
+
optimizer_offload_fraction ...................... 1.0
|
| 77481 |
+
output_bert_embeddings .......................... False
|
| 77482 |
+
overlap_cpu_optimizer_d2h_h2d ................... False
|
| 77483 |
+
overlap_grad_reduce ............................. False
|
| 77484 |
+
overlap_p2p_comm ................................ False
|
| 77485 |
+
overlap_p2p_comm_warmup_flush ................... False
|
| 77486 |
+
overlap_param_gather ............................ False
|
| 77487 |
+
overlap_param_gather_with_optimizer_step ........ False
|
| 77488 |
+
override_opt_param_scheduler .................... False
|
| 77489 |
+
params_dtype .................................... torch.float16
|
| 77490 |
+
patch_dim ....................................... 16
|
| 77491 |
+
per_split_data_args_path ........................ None
|
| 77492 |
+
perform_initialization .......................... True
|
| 77493 |
+
pin_cpu_grads ................................... True
|
| 77494 |
+
pin_cpu_params .................................. True
|
| 77495 |
+
pipeline_model_parallel_comm_backend ............ None
|
| 77496 |
+
pipeline_model_parallel_size .................... 1
|
| 77497 |
+
pipeline_model_parallel_split_rank .............. None
|
| 77498 |
+
position_embedding_type ......................... learned_absolute
|
| 77499 |
+
pretrained_checkpoint ........................... None
|
| 77500 |
+
profile ......................................... False
|
| 77501 |
+
profile_ranks ................................... [0]
|
| 77502 |
+
profile_step_end ................................ 12
|
| 77503 |
+
profile_step_start .............................. 10
|
| 77504 |
+
q_lora_rank ..................................... None
|
| 77505 |
+
qk_head_dim ..................................... 128
|
| 77506 |
+
qk_l2_norm ...................................... False
|
| 77507 |
+
qk_layernorm .................................... False
|
| 77508 |
+
qk_pos_emb_head_dim ............................. 64
|
| 77509 |
+
query_in_block_prob ............................. 0.1
|
| 77510 |
+
rampup_batch_size ............................... None
|
| 77511 |
+
rank ............................................ 0
|
| 77512 |
+
recompute_granularity ........................... None
|
| 77513 |
+
recompute_method ................................ None
|
| 77514 |
+
recompute_modules ............................... None
|
| 77515 |
+
recompute_num_layers ............................ None
|
| 77516 |
+
record_memory_history ........................... False
|
| 77517 |
+
relative_attention_max_distance ................. 128
|
| 77518 |
+
relative_attention_num_buckets .................. 32
|
| 77519 |
+
replication ..................................... False
|
| 77520 |
+
replication_factor .............................. 2
|
| 77521 |
+
replication_jump ................................ None
|
| 77522 |
+
rerun_mode ...................................... disabled
|
| 77523 |
+
reset_attention_mask ............................ False
|
| 77524 |
+
reset_position_ids .............................. False
|
| 77525 |
+
result_rejected_tracker_filename ................ None
|
| 77526 |
+
retriever_report_topk_accuracies ................ []
|
| 77527 |
+
retriever_score_scaling ......................... False
|
| 77528 |
+
retriever_seq_length ............................ 256
|
| 77529 |
+
retro_add_retriever ............................. False
|
| 77530 |
+
retro_attention_gate ............................ 1
|
| 77531 |
+
retro_cyclic_train_iters ........................ None
|
| 77532 |
+
retro_encoder_attention_dropout ................. 0.1
|
| 77533 |
+
retro_encoder_hidden_dropout .................... 0.1
|
| 77534 |
+
retro_encoder_layers ............................ 2
|
| 77535 |
+
retro_num_neighbors ............................. 2
|
| 77536 |
+
retro_num_retrieved_chunks ...................... 2
|
| 77537 |
+
retro_project_dir ............................... None
|
| 77538 |
+
retro_verify_neighbor_count ..................... True
|
| 77539 |
+
rope_scaling_factor ............................. 8.0
|
| 77540 |
+
rotary_base ..................................... 10000
|
| 77541 |
+
rotary_interleaved .............................. False
|
| 77542 |
+
rotary_percent .................................. 1.0
|
| 77543 |
+
rotary_scaling_factor ........................... 1.0
|
| 77544 |
+
rotary_seq_len_interpolation_factor ............. None
|
| 77545 |
+
run_workload_inspector_server ................... False
|
| 77546 |
+
sample_rate ..................................... 1.0
|
| 77547 |
+
save ............................................ gpt-checkpoint
|
| 77548 |
+
save_interval ................................... 16
|
| 77549 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77550 |
+
scatter_gather_tensors_in_pipeline .............. True
|
| 77551 |
+
seed ............................................ 1234
|
| 77552 |
+
seq_length ...................................... 131072
|
| 77553 |
+
sequence_parallel ............................... False
|
| 77554 |
+
sgd_momentum .................................... 0.9
|
| 77555 |
+
short_seq_prob .................................. 0.1
|
| 77556 |
+
skip_train ...................................... False
|
| 77557 |
+
skipped_train_samples ........................... 0
|
| 77558 |
+
spec ............................................ None
|
| 77559 |
+
split ........................................... None
|
| 77560 |
+
squared_relu .................................... False
|
| 77561 |
+
start_weight_decay .............................. 0.1
|
| 77562 |
+
straggler_ctrlr_port ............................ 65535
|
| 77563 |
+
straggler_minmax_count .......................... 1
|
| 77564 |
+
suggested_communication_unit_size ............... None
|
| 77565 |
+
swiglu .......................................... False
|
| 77566 |
+
swin_backbone_type .............................. tiny
|
| 77567 |
+
symmetric_ar_type ............................... None
|
| 77568 |
+
te_rng_tracker .................................. False
|
| 77569 |
+
tensor_model_parallel_size ...................... 8
|
| 77570 |
+
tensorboard_dir ................................. tensorboard-logs/
|
| 77571 |
+
tensorboard_log_interval ........................ 1
|
| 77572 |
+
tensorboard_queue_size .......................... 1000
|
| 77573 |
+
test_data_path .................................. None
|
| 77574 |
+
test_mode ....................................... False
|
| 77575 |
+
tiktoken_num_special_tokens ..................... 1000
|
| 77576 |
+
tiktoken_pattern ................................ None
|
| 77577 |
+
tiktoken_special_tokens ......................... None
|
| 77578 |
+
timing_log_level ................................ 0
|
| 77579 |
+
timing_log_option ............................... minmax
|
| 77580 |
+
titles_data_path ................................ None
|
| 77581 |
+
tokenizer_model ................................. None
|
| 77582 |
+
tokenizer_type .................................. GPT2BPETokenizer
|
| 77583 |
+
torch_fsdp2_reshard_after_forward ............... True
|
| 77584 |
+
tp_comm_bootstrap_backend ....................... nccl
|
| 77585 |
+
tp_comm_bulk_dgrad .............................. True
|
| 77586 |
+
tp_comm_bulk_wgrad .............................. True
|
| 77587 |
+
tp_comm_overlap ................................. False
|
| 77588 |
+
tp_comm_overlap_ag .............................. True
|
| 77589 |
+
tp_comm_overlap_cfg ............................. None
|
| 77590 |
+
tp_comm_overlap_rs .............................. True
|
| 77591 |
+
tp_comm_overlap_rs_dgrad ........................ False
|
| 77592 |
+
tp_comm_split_ag ................................ True
|
| 77593 |
+
tp_comm_split_rs ................................ True
|
| 77594 |
+
train_data_path ................................. None
|
| 77595 |
+
train_iters ..................................... 10
|
| 77596 |
+
train_samples ................................... None
|
| 77597 |
+
train_sync_interval ............................. None
|
| 77598 |
+
transformer_impl ................................ transformer_engine
|
| 77599 |
+
transformer_pipeline_model_parallel_size ........ 1
|
| 77600 |
+
untie_embeddings_and_output_weights ............. False
|
| 77601 |
+
use_checkpoint_args ............................. False
|
| 77602 |
+
use_checkpoint_opt_param_scheduler .............. False
|
| 77603 |
+
use_cpu_initialization .......................... None
|
| 77604 |
+
use_custom_fsdp ................................. False
|
| 77605 |
+
use_dist_ckpt ................................... True
|
| 77606 |
+
use_dist_ckpt_deprecated ........................ False
|
| 77607 |
+
use_distributed_optimizer ....................... False
|
| 77608 |
+
use_flash_attn .................................. False
|
| 77609 |
+
use_legacy_models ............................... False
|
| 77610 |
+
use_mp_args_from_checkpoint_args ................ False
|
| 77611 |
+
use_one_sent_docs ............................... False
|
| 77612 |
+
use_persistent_ckpt_worker ...................... False
|
| 77613 |
+
use_precision_aware_optimizer ................... False
|
| 77614 |
+
use_pytorch_profiler ............................ False
|
| 77615 |
+
use_ring_exchange_p2p ........................... False
|
| 77616 |
+
use_rope_scaling ................................ False
|
| 77617 |
+
use_rotary_position_embeddings .................. False
|
| 77618 |
+
use_sharp ....................................... False
|
| 77619 |
+
use_tokenizer_model_from_checkpoint_args ........ True
|
| 77620 |
+
use_torch_fsdp2 ................................. False
|
| 77621 |
+
use_torch_optimizer_for_cpu_offload ............. False
|
| 77622 |
+
use_tp_pp_dp_mapping ............................ False
|
| 77623 |
+
v_head_dim ...................................... 128
|
| 77624 |
+
valid_data_path ................................. None
|
| 77625 |
+
variable_seq_lengths ............................ False
|
| 77626 |
+
virtual_pipeline_model_parallel_size ............ None
|
| 77627 |
+
vision_backbone_type ............................ vit
|
| 77628 |
+
vision_pretraining .............................. False
|
| 77629 |
+
vision_pretraining_type ......................... classify
|
| 77630 |
+
vocab_extra_ids ................................. 0
|
| 77631 |
+
vocab_file ...................................... vocab.json
|
| 77632 |
+
vocab_size ...................................... None
|
| 77633 |
+
wandb_exp_name ..................................
|
| 77634 |
+
wandb_project ...................................
|
| 77635 |
+
wandb_save_dir ..................................
|
| 77636 |
+
weight_decay .................................... 0.1
|
| 77637 |
+
weight_decay_incr_style ......................... constant
|
| 77638 |
+
wgrad_deferral_limit ............................ 0
|
| 77639 |
+
world_size ...................................... 64
|
| 77640 |
+
yaml_cfg ........................................ None
|
| 77641 |
+
-------------------- end of arguments ---------------------
|
| 77642 |
+
INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1
|
| 77643 |
+
> building GPT2BPETokenizer tokenizer ...
|
| 77644 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77645 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77646 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77647 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77648 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77649 |
+
> padded vocab (size: 50257) with 943 dummy tokens (new size: 51200)
|
| 77650 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77651 |
+
WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED
|
| 77652 |
+
> initializing torch distributed ...
|
| 77653 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77654 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77655 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77656 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77657 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77658 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77659 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77660 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77661 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77662 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77663 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77664 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77665 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77666 |
+
WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written.
|
| 77667 |
+
WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it
|
| 77668 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77669 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77670 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77671 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77672 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77673 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77674 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77675 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77676 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77677 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77678 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77679 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77680 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77681 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77682 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77683 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77684 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77685 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77686 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77687 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77688 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77689 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77690 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77691 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77692 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77693 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77694 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77695 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77696 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77697 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77698 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77699 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77700 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77701 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77702 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77703 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77704 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77705 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77706 |
+
> initialized tensor model parallel with size 8
|
| 77707 |
+
> initialized pipeline model parallel with size 1
|
| 77708 |
+
> setting random seeds to 1234 ...
|
| 77709 |
+
> compiling dataset index builder ...
|
| 77710 |
+
make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
|
| 77711 |
+
INFO:megatron.training.initialize:Setting logging level to 0
|
| 77712 |
+
make: Nothing to be done for 'default'.
|
| 77713 |
+
make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
|
| 77714 |
+
>>> done with dataset index builder. Compilation time: 0.042 seconds
|
| 77715 |
+
WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations.
|
| 77716 |
+
> compiling and loading fused kernels ...
|
| 77717 |
+
>>> done with compiling and loading fused kernels. Compilation time: 4.716 seconds
|
| 77718 |
+
time to initialize megatron (seconds): 11.995
|
| 77719 |
+
[after megatron is initialized] datetime: 2025-06-21 20:28:43
|
| 77720 |
+
building GPT model ...
|
| 77721 |
+
>>> embedding
|
| 77722 |
+
>>> decoder
|
| 77723 |
+
>>> output_layer
|
| 77724 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480
|
| 77725 |
+
>>> embedding
|
| 77726 |
+
>>> decoder
|
| 77727 |
+
>>> output_layer
|
| 77728 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480
|
| 77729 |
+
>>> embedding
|
| 77730 |
+
>>> decoder
|
| 77731 |
+
>>> output_layer
|
| 77732 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480
|
| 77733 |
+
>>> embedding
|
| 77734 |
+
>>> decoder
|
| 77735 |
+
>>> output_layer
|
| 77736 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480
|
| 77737 |
+
>>> embedding
|
| 77738 |
+
>>> decoder
|
| 77739 |
+
>>> output_layer
|
| 77740 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480
|
| 77741 |
+
>>> embedding
|
| 77742 |
+
>>> decoder
|
| 77743 |
+
>>> output_layer
|
| 77744 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480
|
| 77745 |
+
>>> embedding
|
| 77746 |
+
>>> decoder
|
| 77747 |
+
>>> output_layer
|
| 77748 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480
|
| 77749 |
+
>>> embedding
|
| 77750 |
+
>>> decoder
|
| 77751 |
+
>>> output_layer
|
| 77752 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480
|
| 77753 |
+
>>> embedding
|
| 77754 |
+
>>> decoder
|
| 77755 |
+
>>> output_layer
|
| 77756 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480
|
| 77757 |
+
>>> embedding
|
| 77758 |
+
>>> decoder
|
| 77759 |
+
>>> output_layer
|
| 77760 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480
|
| 77761 |
+
>>> embedding
|
| 77762 |
+
>>> decoder
|
| 77763 |
+
>>> output_layer
|
| 77764 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480
|
| 77765 |
+
>>> embedding
|
| 77766 |
+
>>> decoder
|
| 77767 |
+
>>> output_layer
|
| 77768 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480
|
| 77769 |
+
>>> embedding
|
| 77770 |
+
>>> decoder
|
| 77771 |
+
>>> output_layer
|
| 77772 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480
|
| 77773 |
+
>>> embedding
|
| 77774 |
+
>>> decoder
|
| 77775 |
+
>>> output_layer
|
| 77776 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480
|
| 77777 |
+
>>> embedding
|
| 77778 |
+
>>> decoder
|
| 77779 |
+
>>> output_layer
|
| 77780 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480
|
| 77781 |
+
>>> embedding
|
| 77782 |
+
>>> decoder
|
| 77783 |
+
>>> output_layer
|
| 77784 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480
|
| 77785 |
+
>>> embedding
|
| 77786 |
+
>>> decoder
|
| 77787 |
+
>>> output_layer
|
| 77788 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480
|
| 77789 |
+
>>> embedding
|
| 77790 |
+
>>> decoder
|
| 77791 |
+
>>> output_layer
|
| 77792 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480
|
| 77793 |
+
>>> embedding
|
| 77794 |
+
>>> decoder
|
| 77795 |
+
>>> output_layer
|
| 77796 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480
|
| 77797 |
+
>>> embedding
|
| 77798 |
+
>>> decoder
|
| 77799 |
+
>>> output_layer
|
| 77800 |
+
>>> embedding
|
| 77801 |
+
>>> decoder
|
| 77802 |
+
>>> output_layer
|
| 77803 |
+
>>> embedding
|
| 77804 |
+
>>> decoder
|
| 77805 |
+
>>> output_layer
|
| 77806 |
+
>>> embedding
|
| 77807 |
+
>>> decoder
|
| 77808 |
+
>>> output_layer
|
| 77809 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480
|
| 77810 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480
|
| 77811 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480
|
| 77812 |
+
>>> embedding
|
| 77813 |
+
>>> decoder
|
| 77814 |
+
>>> output_layer
|
| 77815 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480
|
| 77816 |
+
>>> embedding
|
| 77817 |
+
>>> decoder
|
| 77818 |
+
>>> output_layer
|
| 77819 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480
|
| 77820 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480
|
| 77821 |
+
>>> embedding
|
| 77822 |
+
>>> decoder
|
| 77823 |
+
>>> output_layer
|
| 77824 |
+
>>> embedding
|
| 77825 |
+
>>> decoder
|
| 77826 |
+
>>> output_layer
|
| 77827 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480
|
| 77828 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480
|
| 77829 |
+
>>> embedding
|
| 77830 |
+
>>> decoder
|
| 77831 |
+
>>> output_layer
|
| 77832 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480
|
| 77833 |
+
>>> embedding
|
| 77834 |
+
>>> decoder
|
| 77835 |
+
>>> output_layer
|
| 77836 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480
|
| 77837 |
+
>>> embedding
|
| 77838 |
+
>>> decoder
|
| 77839 |
+
>>> output_layer
|
| 77840 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480
|
| 77841 |
+
>>> embedding
|
| 77842 |
+
>>> decoder
|
| 77843 |
+
>>> output_layer
|
| 77844 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480
|
| 77845 |
+
>>> embedding
|
| 77846 |
+
>>> decoder
|
| 77847 |
+
>>> output_layer
|
| 77848 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480
|
| 77849 |
+
>>> embedding
|
| 77850 |
+
>>> decoder
|
| 77851 |
+
>>> output_layer
|
| 77852 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480
|
| 77853 |
+
>>> embedding
|
| 77854 |
+
>>> decoder
|
| 77855 |
+
>>> output_layer
|
| 77856 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480
|
| 77857 |
+
>>> embedding
|
| 77858 |
+
>>> decoder
|
| 77859 |
+
>>> output_layer
|
| 77860 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480
|
| 77861 |
+
>>> embedding
|
| 77862 |
+
>>> decoder
|
| 77863 |
+
>>> output_layer
|
| 77864 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480
|
| 77865 |
+
>>> embedding
|
| 77866 |
+
>>> decoder
|
| 77867 |
+
>>> output_layer
|
| 77868 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480
|
| 77869 |
+
>>> embedding
|
| 77870 |
+
>>> decoder
|
| 77871 |
+
>>> output_layer
|
| 77872 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480
|
| 77873 |
+
>>> embedding
|
| 77874 |
+
>>> decoder
|
| 77875 |
+
>>> output_layer
|
| 77876 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480
|
| 77877 |
+
>>> embedding
|
| 77878 |
+
>>> decoder
|
| 77879 |
+
>>> output_layer
|
| 77880 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480
|
| 77881 |
+
>>> embedding
|
| 77882 |
+
>>> decoder
|
| 77883 |
+
>>> output_layer
|
| 77884 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480
|
| 77885 |
+
>>> embedding
|
| 77886 |
+
>>> decoder
|
| 77887 |
+
>>> output_layer
|
| 77888 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480
|
| 77889 |
+
>>> embedding
|
| 77890 |
+
>>> decoder
|
| 77891 |
+
>>> output_layer
|
| 77892 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480
|
| 77893 |
+
>>> embedding
|
| 77894 |
+
>>> decoder
|
| 77895 |
+
>>> output_layer
|
| 77896 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480
|
| 77897 |
+
>>> embedding
|
| 77898 |
+
>>> decoder
|
| 77899 |
+
>>> output_layer
|
| 77900 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480
|
| 77901 |
+
>>> embedding
|
| 77902 |
+
>>> decoder
|
| 77903 |
+
>>> output_layer
|
| 77904 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480
|
| 77905 |
+
>>> embedding
|
| 77906 |
+
>>> decoder
|
| 77907 |
+
>>> output_layer
|
| 77908 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480
|
| 77909 |
+
>>> embedding
|
| 77910 |
+
>>> decoder
|
| 77911 |
+
>>> output_layer
|
| 77912 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480
|
| 77913 |
+
INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False)
|
| 77914 |
+
INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1
|
| 77915 |
+
Params for bucket 1 (607188480 elements, 607188480 padded size):
|
| 77916 |
+
module.decoder.layers.1.mlp.linear_fc1.bias
|
| 77917 |
+
module.decoder.layers.0.mlp.linear_fc2.weight
|
| 77918 |
+
module.decoder.layers.0.mlp.linear_fc1.bias
|
| 77919 |
+
module.decoder.final_layernorm.weight
|
| 77920 |
+
module.decoder.layers.1.self_attention.linear_qkv.weight
|
| 77921 |
+
module.decoder.layers.1.self_attention.linear_proj.weight
|
| 77922 |
+
module.decoder.layers.0.self_attention.linear_qkv.bias
|
| 77923 |
+
module.embedding.position_embeddings.weight
|
| 77924 |
+
module.embedding.word_embeddings.weight
|
| 77925 |
+
module.decoder.layers.1.mlp.linear_fc2.weight
|
| 77926 |
+
module.decoder.layers.1.self_attention.linear_proj.bias
|
| 77927 |
+
module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight
|
| 77928 |
+
module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias
|
| 77929 |
+
module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias
|
| 77930 |
+
module.decoder.layers.0.self_attention.linear_proj.weight
|
| 77931 |
+
module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight
|
| 77932 |
+
module.decoder.layers.1.self_attention.linear_qkv.bias
|
| 77933 |
+
module.decoder.layers.0.mlp.linear_fc2.bias
|
| 77934 |
+
module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight
|
| 77935 |
+
module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias
|
| 77936 |
+
module.decoder.layers.0.self_attention.linear_proj.bias
|
| 77937 |
+
module.decoder.layers.1.mlp.linear_fc1.weight
|
| 77938 |
+
module.decoder.layers.0.mlp.linear_fc1.weight
|
| 77939 |
+
module.decoder.layers.1.mlp.linear_fc2.bias
|
| 77940 |
+
module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight
|
| 77941 |
+
module.decoder.layers.0.self_attention.linear_qkv.weight
|
| 77942 |
+
module.decoder.final_layernorm.bias
|
| 77943 |
+
module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias
|
| 77944 |
+
INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=<megatron.core.timers.Timers object at 0x145c150c5c40>, config_logger_dir='')
|
| 77945 |
+
INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine
|
| 77946 |
+
>>> embedding
|
| 77947 |
+
>>> decoder
|
| 77948 |
+
>>> output_layer
|
| 77949 |
+
>>> embedding
|
| 77950 |
+
>>> decoder
|
| 77951 |
+
>>> output_layer
|
| 77952 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480
|
| 77953 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480
|
| 77954 |
+
>>> embedding
|
| 77955 |
+
>>> decoder
|
| 77956 |
+
>>> output_layer
|
| 77957 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480
|
| 77958 |
+
>>> embedding
|
| 77959 |
+
>>> decoder
|
| 77960 |
+
>>> output_layer
|
| 77961 |
+
> number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480
|
| 77962 |
+
>>> embedding
|
| 77963 |
+
>>> decoder
|
| 77964 |
+
>>> output_layer
|
| 77965 |
+
> number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480
|
| 77966 |
+
>>> embedding
|
| 77967 |
+
>>> decoder
|
| 77968 |
+
>>> output_layer
|
| 77969 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480
|
| 77970 |
+
>>> embedding
|
| 77971 |
+
>>> decoder
|
| 77972 |
+
>>> output_layer
|
| 77973 |
+
> number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480
|
| 77974 |
+
>>> embedding
|
| 77975 |
+
>>> decoder
|
| 77976 |
+
>>> output_layer
|
| 77977 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480
|
| 77978 |
+
>>> embedding
|
| 77979 |
+
>>> decoder
|
| 77980 |
+
>>> output_layer
|
| 77981 |
+
> number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480
|
| 77982 |
+
>>> embedding
|
| 77983 |
+
>>> decoder
|
| 77984 |
+
>>> output_layer
|
| 77985 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480
|
| 77986 |
+
>>> embedding
|
| 77987 |
+
>>> decoder
|
| 77988 |
+
>>> output_layer
|
| 77989 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480
|
| 77990 |
+
>>> embedding
|
| 77991 |
+
>>> decoder
|
| 77992 |
+
>>> output_layer
|
| 77993 |
+
> number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480
|
| 77994 |
+
>>> embedding
|
| 77995 |
+
>>> decoder
|
| 77996 |
+
>>> output_layer
|
| 77997 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480
|
| 77998 |
+
>>> embedding
|
| 77999 |
+
>>> decoder
|
| 78000 |
+
>>> output_layer
|
| 78001 |
+
> number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480
|
| 78002 |
+
>>> embedding
|
| 78003 |
+
>>> decoder
|
| 78004 |
+
>>> output_layer
|
| 78005 |
+
> number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480
|
| 78006 |
+
>>> embedding
|
| 78007 |
+
>>> decoder
|
| 78008 |
+
>>> output_layer
|
| 78009 |
+
> number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480
|
| 78010 |
+
loading distributed checkpoint from gpt-checkpoint at iteration 10
|
attnserver.run_attnserver.slurm.sh.343195.out.log
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
attnserver.run_attnserver.slurm.sh.343201.err.log
CHANGED
|
@@ -6513,3 +6513,24 @@ W0621 20:24:56.856000 2481343 site-packages/torch/distributed/run.py:766] ******
|
|
| 6513 |
warnings.warn(
|
| 6514 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 6515 |
warnings.warn(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6513 |
warnings.warn(
|
| 6514 |
/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
|
| 6515 |
warnings.warn(
|
| 6516 |
+
[rank0]: Traceback (most recent call last):
|
| 6517 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in <module>
|
| 6518 |
+
[rank0]: pretrain(
|
| 6519 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain
|
| 6520 |
+
[rank0]: save_checkpoint(
|
| 6521 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint
|
| 6522 |
+
[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy,
|
| 6523 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 6524 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 386, in save
|
| 6525 |
+
[rank0]: common_strategy.save_common(state_dict, checkpoint_dir)
|
| 6526 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/common.py", line 48, in save_common
|
| 6527 |
+
[rank0]: torch.save(common_state_dict, path)
|
| 6528 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 964, in save
|
| 6529 |
+
[rank0]: with _open_zipfile_writer(f) as opened_zipfile:
|
| 6530 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^
|
| 6531 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 828, in _open_zipfile_writer
|
| 6532 |
+
[rank0]: return container(name_or_buffer)
|
| 6533 |
+
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 6534 |
+
[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 792, in __init__
|
| 6535 |
+
[rank0]: torch._C.PyTorchFileWriter(
|
| 6536 |
+
[rank0]: RuntimeError: Parent directory gpt-checkpoint/iter_0000010 does not exist.
|
attnserver.run_attnserver.slurm.sh.343201.out.log
CHANGED
|
@@ -32912,3 +32912,540 @@ batch tensor after cp: position_ids torch.Size([1, 40960])
|
|
| 32912 |
Start exporting trace 6
|
| 32913 |
Done exporting trace 6
|
| 32914 |
[2025-06-21 20:28:12] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 16736.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32912 |
Start exporting trace 6
|
| 32913 |
Done exporting trace 6
|
| 32914 |
[2025-06-21 20:28:12] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 16736.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 32915 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32916 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32917 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32918 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32919 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32920 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 32921 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 32922 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 32923 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 32924 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 32925 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32926 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32927 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32928 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32929 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32930 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 32931 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 32932 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 32933 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 32934 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 32935 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32936 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32937 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32938 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32939 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32940 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 32941 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 32942 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 32943 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 32944 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 32945 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32946 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32947 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32948 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32949 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32950 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 32951 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 32952 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 32953 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 32954 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 32955 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32956 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32957 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32958 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32959 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32960 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 32961 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 32962 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 32963 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 32964 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 32965 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32966 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32967 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32968 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32969 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32970 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 32971 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 32972 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 32973 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 32974 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 32975 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32976 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32977 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32978 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32979 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32980 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 32981 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 32982 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 32983 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 32984 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 32985 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32986 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32987 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32988 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32989 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 32990 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 32991 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 32992 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 32993 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 32994 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 32995 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 32996 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 32997 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 32998 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 32999 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33000 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33001 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33002 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33003 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33004 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33005 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33006 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33007 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33008 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33009 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33010 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33011 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33012 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33013 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33014 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33015 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33016 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33017 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33018 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33019 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33020 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33021 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33022 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33023 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33024 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33025 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33026 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33027 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33028 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33029 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33030 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33031 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33032 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33033 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33034 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33035 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33036 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33037 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33038 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33039 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33040 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33041 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33042 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33043 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33044 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33045 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33046 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33047 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33048 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33049 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33050 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33051 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33052 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33053 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33054 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33055 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33056 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33057 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33058 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33059 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33060 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33061 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33062 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33063 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33064 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33065 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33066 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33067 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33068 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33069 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33070 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33071 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33072 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33073 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33074 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33075 |
+
Start exporting trace 7
|
| 33076 |
+
Done exporting trace 7
|
| 33077 |
+
[2025-06-21 20:28:29] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 16827.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 33078 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33079 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33080 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33081 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33082 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33083 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33084 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33085 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33086 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33087 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33088 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33089 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33090 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33091 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33092 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33093 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33094 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33095 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33096 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33097 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33098 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33099 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33100 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33101 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33102 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33103 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33104 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33105 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33106 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33107 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33108 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33109 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33110 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33111 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33112 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33113 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33114 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33115 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33116 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33117 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33118 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33119 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33120 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33121 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33122 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33123 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33124 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33125 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33126 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33127 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33128 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33129 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33130 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33131 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33132 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33133 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33134 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33135 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33136 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33137 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33138 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33139 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33140 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33141 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33142 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33143 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33144 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33145 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33146 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33147 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33148 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33149 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33150 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33151 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33152 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33153 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33154 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33155 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33156 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33157 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33158 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33159 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33160 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33161 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33162 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33163 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33164 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33165 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33166 |
+
batch tensor: attention_mask batch tensor after cp:torch.Size([1, 1, 81920, 81920])
|
| 33167 |
+
tokens batch tensor: position_idstorch.Size([1, 40960])
|
| 33168 |
+
torch.Size([1, 81920])batch tensor after cp:
|
| 33169 |
+
labels torch.Size([1, 40960])
|
| 33170 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33171 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33172 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33173 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33174 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33175 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33176 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33177 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33178 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33179 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33180 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33181 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33182 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33183 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33184 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33185 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33186 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33187 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33188 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33189 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33190 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33191 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33192 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33193 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33194 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33195 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33196 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33197 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33198 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33199 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33200 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33201 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33202 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33203 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33204 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33205 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33206 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33207 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33208 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33209 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33210 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33211 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33212 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33213 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33214 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33215 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33216 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33217 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33218 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33219 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33220 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33221 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33222 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33223 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33224 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33225 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33226 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33227 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33228 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33229 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33230 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33231 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33232 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33233 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33234 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33235 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33236 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33237 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33238 |
+
Start exporting trace 8
|
| 33239 |
+
Done exporting trace 8
|
| 33240 |
+
[2025-06-21 20:28:46] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 16795.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 33241 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33242 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33243 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33244 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33245 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33246 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33247 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33248 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33249 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33250 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33251 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33252 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33253 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33254 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33255 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33256 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33257 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33258 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33259 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33260 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33261 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33262 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33263 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33264 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33265 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33266 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33267 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33268 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33269 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33270 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33271 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33272 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33273 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33274 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33275 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33276 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33277 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33278 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33279 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33280 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33281 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33282 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33283 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33284 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33285 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33286 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33287 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33288 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33289 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33290 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33291 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33292 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33293 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33294 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33295 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33296 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33297 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33298 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33299 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33300 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33301 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33302 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33303 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33304 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33305 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33306 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33307 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33308 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33309 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33310 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33311 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33312 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33313 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33314 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33315 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33316 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33317 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33318 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33319 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33320 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33321 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33322 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33323 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33324 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33325 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33326 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33327 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33328 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33329 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33330 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33331 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33332 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33333 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33334 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33335 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33336 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33337 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33338 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33339 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33340 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33341 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33342 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33343 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33344 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33345 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33346 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33347 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33348 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33349 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33350 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33351 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33352 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33353 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33354 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33355 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33356 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33357 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33358 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33359 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33360 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33361 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33362 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33363 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33364 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33365 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33366 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33367 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33368 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33369 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33370 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33371 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33372 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33373 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33374 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33375 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33376 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33377 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33378 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33379 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33380 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33381 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33382 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33383 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33384 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33385 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33386 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33387 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33388 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33389 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33390 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33391 |
+
batch tensor: tokens torch.Size([1, 81920])
|
| 33392 |
+
batch tensor: labels torch.Size([1, 81920])
|
| 33393 |
+
batch tensor: loss_mask torch.Size([1, 81920])
|
| 33394 |
+
batch tensor: attention_mask torch.Size([1, 1, 81920, 81920])
|
| 33395 |
+
batch tensor: position_ids torch.Size([1, 81920])
|
| 33396 |
+
batch tensor after cp: tokens torch.Size([1, 40960])
|
| 33397 |
+
batch tensor after cp: labels torch.Size([1, 40960])
|
| 33398 |
+
batch tensor after cp: loss_mask torch.Size([1, 40960])
|
| 33399 |
+
batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 81920])
|
| 33400 |
+
batch tensor after cp: position_ids torch.Size([1, 40960])
|
| 33401 |
+
Start exporting trace 9
|
| 33402 |
+
Done exporting trace 9
|
| 33403 |
+
[2025-06-21 20:29:03] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 16912.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
|
| 33404 |
+
[after training is done] datetime: 2025-06-21 20:29:03
|
| 33405 |
+
saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format
|
| 33406 |
+
DEBUG:megatron.training.checkpointing:rank: 6, takes 0.09083819389343262 to prepare state dict for ckpt
|
| 33407 |
+
DEBUG:megatron.training.checkpointing:rank: 7, takes 0.09198808670043945 to prepare state dict for ckpt
|
| 33408 |
+
DEBUG:megatron.training.checkpointing:rank: 5, takes 0.09203386306762695 to prepare state dict for ckpt
|
| 33409 |
+
DEBUG:megatron.training.checkpointing:rank: 4, takes 0.09206128120422363 to prepare state dict for ckpt
|
| 33410 |
+
DEBUG:megatron.training.checkpointing:rank: 2, takes 0.09206986427307129 to prepare state dict for ckpt
|
| 33411 |
+
DEBUG:megatron.training.checkpointing:rank: 3, takes 0.09206938743591309 to prepare state dict for ckpt
|
| 33412 |
+
DEBUG:megatron.training.checkpointing:rank: 1, takes 0.09209275245666504 to prepare state dict for ckpt
|
| 33413 |
+
DEBUG:megatron.training.checkpointing:rank: 0, takes 0.09324049949645996 to prepare state dict for ckpt
|
| 33414 |
+
DEBUG:megatron.training.checkpointing:rank: 15, takes 0.09270429611206055 to prepare state dict for ckpt
|
| 33415 |
+
DEBUG:megatron.training.checkpointing:rank: 11, takes 0.09274792671203613 to prepare state dict for ckpt
|
| 33416 |
+
DEBUG:megatron.training.checkpointing:rank: 9, takes 0.0927731990814209 to prepare state dict for ckpt
|
| 33417 |
+
DEBUG:megatron.training.checkpointing:rank: 12, takes 0.09279537200927734 to prepare state dict for ckpt
|
| 33418 |
+
DEBUG:megatron.training.checkpointing:rank: 13, takes 0.09290647506713867 to prepare state dict for ckpt
|
| 33419 |
+
DEBUG:megatron.training.checkpointing:rank: 10, takes 0.09294629096984863 to prepare state dict for ckpt
|
| 33420 |
+
DEBUG:megatron.training.checkpointing:rank: 8, takes 0.09694695472717285 to prepare state dict for ckpt
|
| 33421 |
+
DEBUG:megatron.training.checkpointing:rank: 14, takes 0.09971809387207031 to prepare state dict for ckpt
|
| 33422 |
+
WARNING:megatron.core.dist_checkpointing.serialization:Overwriting old incomplete / corrupted checkpoint...
|
| 33423 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33424 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33425 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33426 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33427 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33428 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33429 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33430 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33431 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33432 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33433 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33434 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33435 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33436 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33437 |
+
DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization
|
| 33438 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33439 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33440 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33441 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33442 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33443 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33444 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33445 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33446 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33447 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33448 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33449 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33450 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|
| 33451 |
+
DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)]
|