Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- scripts/run_1.14G_dp16_tp16_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp16_tp1_pp2_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp16_tp32_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp16_tp8_pp1_acc1_mbs8_seq8192_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp32_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp32_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp64_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp32_tp16_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.sh +68 -0
- scripts/run_1.14G_dp32_tp4_pp1_acc8_mbs2_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp128_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp16_pp1_acc128_mbs4_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp2_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp2_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp4_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp8_tp1_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp8_tp32_pp1_acc64_mbs1_seq8192_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.34G_dp16_tp16_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp16_tp4_pp1_acc2_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp16_tp8_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp16_tp8_pp1_acc4_mbs8_seq2048_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp1_tp4_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh +73 -0
- scripts/run_1.34G_dp256_tp1_pp2_acc8_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp256_tp2_pp1_acc1_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp128_pp1_acc256_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp256_pp1_acc64_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp32_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp32_pp1_acc2_mbs8_seq32768_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp32_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
- scripts/run_1.34G_dp2_tp4_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp64_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp8_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp32_tp2_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp32_tp2_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp32_tp2_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp32_tp4_pp1_acc2_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp128_pp1_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp128_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp128_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp16_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp16_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp1_pp2_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
- scripts/run_1.34G_dp4_tp1_pp2_acc8_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp2_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp4_pp2_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh +159 -0
- scripts/run_1.34G_dp4_tp8_pp1_acc1_mbs128_seq8192_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp8_tp1_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
- scripts/run_1.34G_dp8_tp1_pp2_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
scripts/run_1.14G_dp16_tp16_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp16_tp16_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp16_tp16_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp16_tp1_pp2_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp16_tp1_pp2_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=4 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp16_tp1_pp2_acc1_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp16_tp32_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp16_tp32_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp16_tp32_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp16_tp8_pp1_acc1_mbs8_seq8192_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp16_tp8_pp1_acc1_mbs8_seq8192_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp16_tp8_pp1_acc1_mbs8_seq8192_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp32_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp32_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp32_pp1_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp32_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp32_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp32_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp64_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp64_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp64_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeRED_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeRED_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp32_tp16_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp32_tp16_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32 # Job name
|
| 4 |
+
#SBATCH --time=00:15:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp32_tp16_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.yaml
|
scripts/run_1.14G_dp32_tp4_pp1_acc8_mbs2_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp32_tp4_pp1_acc8_mbs2_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp32_tp4_pp1_acc8_mbs2_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp128_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp128_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp128_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp16_pp1_acc128_mbs4_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp16_pp1_acc128_mbs4_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp16_pp1_acc128_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp2_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp2_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp2_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp2_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp4_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp4_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp4_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp8_tp1_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp8_tp1_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp8_tp1_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp8_tp32_pp1_acc64_mbs1_seq8192_zero1_tpmodeALL_vocab32k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.14G_dp8_tp32_pp1_acc64_mbs1_seq8192_zero1_tpmodeALL_vocab32k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.14G_dp8_tp32_pp1_acc64_mbs1_seq8192_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.34G_dp16_tp16_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp16_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp16_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp16_tp4_pp1_acc2_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp4_pp1_acc2_mbs1_seq32768_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp4_pp1_acc2_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp16_tp8_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp8_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp8_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp16_tp8_pp1_acc4_mbs8_seq2048_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp8_pp1_acc4_mbs8_seq2048_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp8_pp1_acc4_mbs8_seq2048_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp1_tp4_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp1_tp4_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:10:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102
|
| 8 |
+
|
| 9 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 10 |
+
|
| 11 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
| 12 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 13 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 14 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 15 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 16 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
| 17 |
+
|
| 18 |
+
set -x -e
|
| 19 |
+
|
| 20 |
+
# Load any necessary modules for your system
|
| 21 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 22 |
+
module load cuda/12.1
|
| 23 |
+
|
| 24 |
+
# Activate your conda environment if needed
|
| 25 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 26 |
+
conda activate 2-1-cu121
|
| 27 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 28 |
+
|
| 29 |
+
# Get the node names from SLURM
|
| 30 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 31 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 32 |
+
export MASTER_PORT=12356
|
| 33 |
+
|
| 34 |
+
# Calculate total number of processes
|
| 35 |
+
export NNODES=$SLURM_NNODES
|
| 36 |
+
export GPUS_PER_NODE=8
|
| 37 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 38 |
+
|
| 39 |
+
# Set some environment variables for better distributed training
|
| 40 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 41 |
+
export NCCL_DEBUG=WARN # INFO
|
| 42 |
+
|
| 43 |
+
# Nanotron specific
|
| 44 |
+
export NANOTRON_BENCHMARK=1
|
| 45 |
+
# Disable wandb
|
| 46 |
+
export WANDB_MODE=disabled
|
| 47 |
+
|
| 48 |
+
# Trying to avoid hangs
|
| 49 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# Print GPU topology information
|
| 53 |
+
echo "=== GPU Topology ==="
|
| 54 |
+
nvidia-smi topo -m
|
| 55 |
+
echo "=================="
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# Print some debugging information
|
| 59 |
+
echo "Master node: $MASTER_NODE"
|
| 60 |
+
echo "All nodes: $NODELIST"
|
| 61 |
+
echo "World size: $WORLD_SIZE"
|
| 62 |
+
|
| 63 |
+
# Launch the training script using srun
|
| 64 |
+
srun --wait=0 --kill-on-bad-exit=1 torchrun \
|
| 65 |
+
--nnodes=$NNODES \
|
| 66 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 67 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 68 |
+
--rdzv_backend=c10d \
|
| 69 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 70 |
+
--max_restarts 0 \
|
| 71 |
+
--rdzv_conf timeout=60 \
|
| 72 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
| 73 |
+
--config-file benchmark/configs/config_1.34G_dp1_tp4_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp256_tp1_pp2_acc8_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp256_tp1_pp2_acc8_mbs1_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:15:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp256_tp1_pp2_acc8_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp256_tp2_pp1_acc1_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp256_tp2_pp1_acc1_mbs2_seq8192_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp256_tp2_pp1_acc1_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp128_pp1_acc256_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp128_pp1_acc256_mbs1_seq8192_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp128_pp1_acc256_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp256_pp1_acc64_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp256_pp1_acc64_mbs1_seq8192_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp256_pp1_acc64_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp32_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp32_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp32_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp32_pp1_acc2_mbs8_seq32768_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp32_pp1_acc2_mbs8_seq32768_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp32_pp1_acc2_mbs8_seq32768_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp32_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp32_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k # Job name
|
| 3 |
+
#SBATCH --time=00:40:00
|
| 4 |
+
#SBATCH --partition=hopper-prod
|
| 5 |
+
#SBATCH --qos=high
|
| 6 |
+
|
| 7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 8 |
+
|
| 9 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
| 15 |
+
|
| 16 |
+
# run using
|
| 17 |
+
# sbatch --nodes=1 run_multinode.sh
|
| 18 |
+
# or
|
| 19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
| 20 |
+
|
| 21 |
+
set -x -e
|
| 22 |
+
|
| 23 |
+
# If not running under SLURM, set default SLURM environment variables
|
| 24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
| 25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
| 26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
| 27 |
+
exit 1
|
| 28 |
+
fi
|
| 29 |
+
if [ -z "${NNODES}" ]; then
|
| 30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
| 31 |
+
exit 1
|
| 32 |
+
fi
|
| 33 |
+
export SALLOC_MODE=1
|
| 34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
| 35 |
+
export SLURM_NNODES=$NNODES
|
| 36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
| 37 |
+
fi
|
| 38 |
+
|
| 39 |
+
# Load any necessary modules for your system
|
| 40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 41 |
+
module load cuda/12.1
|
| 42 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
| 43 |
+
# unset FI_PROVIDER
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# Activate your conda environment if needed
|
| 47 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 48 |
+
conda activate 2-1-cu121
|
| 49 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 50 |
+
|
| 51 |
+
# Get the node names from SLURM
|
| 52 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
| 53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 54 |
+
|
| 55 |
+
else # srun mode
|
| 56 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
| 57 |
+
fi
|
| 58 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 59 |
+
export MASTER_PORT=12356
|
| 60 |
+
|
| 61 |
+
# Calculate total number of processes
|
| 62 |
+
export NNODES=$SLURM_NNODES
|
| 63 |
+
export GPUS_PER_NODE=8
|
| 64 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 65 |
+
|
| 66 |
+
# Set some environment variables for better distributed training
|
| 67 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 68 |
+
export NCCL_DEBUG=WARN # INFO, WARN
|
| 69 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
| 70 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
| 71 |
+
|
| 72 |
+
# Nanotron specific
|
| 73 |
+
export NANOTRON_BENCHMARK=1
|
| 74 |
+
export WANDB_MODE=disabled
|
| 75 |
+
|
| 76 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
| 77 |
+
|
| 78 |
+
# Trying to avoid hangs
|
| 79 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
| 80 |
+
|
| 81 |
+
# debug
|
| 82 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
| 83 |
+
|
| 84 |
+
# export NCCL_P2P_LEVEL=NVL
|
| 85 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
| 86 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
| 87 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
| 88 |
+
# Test Script - save as test_comm.sh
|
| 89 |
+
|
| 90 |
+
# Test 1 - Force TCP
|
| 91 |
+
# echo "Running with TCP only..."
|
| 92 |
+
# export NCCL_P2P_LEVEL=LOC
|
| 93 |
+
|
| 94 |
+
# # Match bandwidth patterns
|
| 95 |
+
# export NCCL_MAX_NCHANNELS=2
|
| 96 |
+
# export NCCL_MIN_NCHANNELS=2
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
| 100 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
| 101 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
| 102 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
| 103 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
| 104 |
+
|
| 105 |
+
# Force SHM
|
| 106 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
| 107 |
+
# export NCCL_SOCKET_NTHREADS=1
|
| 108 |
+
# export FI_PROVIDER="tcp"
|
| 109 |
+
|
| 110 |
+
# Print GPU topology information
|
| 111 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
| 112 |
+
echo "=== GPU Topology ==="
|
| 113 |
+
nvidia-smi topo -m
|
| 114 |
+
echo "=================="
|
| 115 |
+
export SRUN_ALLOC_ARGS=""
|
| 116 |
+
else
|
| 117 |
+
export JOBNAME="bench_1.34G_dp2_tp32_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k"
|
| 118 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
| 119 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
| 120 |
+
fi
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
# Print some debugging information
|
| 124 |
+
echo "Master node: $MASTER_NODE"
|
| 125 |
+
echo "All nodes: $NODELIST"
|
| 126 |
+
echo "World size: $WORLD_SIZE"
|
| 127 |
+
|
| 128 |
+
# Launch the training script using srun in background
|
| 129 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
| 130 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
| 131 |
+
--nnodes=$NNODES \
|
| 132 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 133 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 134 |
+
--rdzv_backend=c10d \
|
| 135 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 136 |
+
--max_restarts 0 \
|
| 137 |
+
--rdzv_conf timeout=60 \
|
| 138 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
| 139 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp32_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
| 140 |
+
# Store the process ID
|
| 141 |
+
SRUN_PID=$!
|
| 142 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 143 |
+
|
| 144 |
+
# Optionally, you can add:
|
| 145 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 146 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 147 |
+
|
| 148 |
+
else # sbatch mode
|
| 149 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
| 150 |
+
--nnodes=$NNODES \
|
| 151 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 152 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 153 |
+
--rdzv_backend=c10d \
|
| 154 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 155 |
+
--max_restarts 0 \
|
| 156 |
+
--rdzv_conf timeout=60 \
|
| 157 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
| 158 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp32_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml
|
| 159 |
+
fi
|
scripts/run_1.34G_dp2_tp4_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp4_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp4_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp64_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp64_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp64_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp8_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp8_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp8_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp32_tp2_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp32_tp2_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp32_tp2_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp32_tp2_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp32_tp2_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp32_tp2_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp32_tp2_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp32_tp2_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp32_tp2_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp32_tp4_pp1_acc2_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp32_tp4_pp1_acc2_mbs2_seq8192_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp32_tp4_pp1_acc2_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp128_pp1_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp128_pp1_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp128_pp1_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp128_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp128_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp128_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp128_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp128_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp128_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp16_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp16_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp16_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp16_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp16_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp16_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp1_pp2_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp1_pp2_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name
|
| 3 |
+
#SBATCH --time=01:10:00
|
| 4 |
+
#SBATCH --partition=hopper-prod
|
| 5 |
+
#SBATCH --qos=high
|
| 6 |
+
|
| 7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 8 |
+
|
| 9 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
| 10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
| 15 |
+
|
| 16 |
+
# run using
|
| 17 |
+
# sbatch --nodes=1 run_multinode.sh
|
| 18 |
+
# or
|
| 19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
| 20 |
+
|
| 21 |
+
set -x -e
|
| 22 |
+
|
| 23 |
+
# If not running under SLURM, set default SLURM environment variables
|
| 24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
| 25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
| 26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
| 27 |
+
exit 1
|
| 28 |
+
fi
|
| 29 |
+
if [ -z "${NNODES}" ]; then
|
| 30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
| 31 |
+
exit 1
|
| 32 |
+
fi
|
| 33 |
+
export SALLOC_MODE=1
|
| 34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
| 35 |
+
export SLURM_NNODES=$NNODES
|
| 36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
| 37 |
+
fi
|
| 38 |
+
|
| 39 |
+
# Load any necessary modules for your system
|
| 40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 41 |
+
module load cuda/12.1
|
| 42 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
| 43 |
+
# unset FI_PROVIDER
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# Activate your conda environment if needed
|
| 47 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 48 |
+
conda activate 2-1-cu121
|
| 49 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 50 |
+
|
| 51 |
+
# Get the node names from SLURM
|
| 52 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
| 53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 54 |
+
|
| 55 |
+
else # srun mode
|
| 56 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
| 57 |
+
fi
|
| 58 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 59 |
+
export MASTER_PORT=12356
|
| 60 |
+
|
| 61 |
+
# Calculate total number of processes
|
| 62 |
+
export NNODES=$SLURM_NNODES
|
| 63 |
+
export GPUS_PER_NODE=8
|
| 64 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 65 |
+
|
| 66 |
+
# Set some environment variables for better distributed training
|
| 67 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 68 |
+
export NCCL_DEBUG=INFO # INFO, WARN
|
| 69 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
| 70 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
| 71 |
+
|
| 72 |
+
# Nanotron specific
|
| 73 |
+
export NANOTRON_BENCHMARK=1
|
| 74 |
+
export WANDB_MODE=disabled
|
| 75 |
+
|
| 76 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
| 77 |
+
|
| 78 |
+
# Trying to avoid hangs
|
| 79 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
| 80 |
+
|
| 81 |
+
# debug
|
| 82 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
| 83 |
+
|
| 84 |
+
# export NCCL_P2P_LEVEL=NVL
|
| 85 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
| 86 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
| 87 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
| 88 |
+
# Test Script - save as test_comm.sh
|
| 89 |
+
|
| 90 |
+
# Test 1 - Force TCP
|
| 91 |
+
# echo "Running with TCP only..."
|
| 92 |
+
# export NCCL_P2P_LEVEL=LOC
|
| 93 |
+
|
| 94 |
+
# # Match bandwidth patterns
|
| 95 |
+
# export NCCL_MAX_NCHANNELS=2
|
| 96 |
+
# export NCCL_MIN_NCHANNELS=2
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
| 100 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
| 101 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
| 102 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
| 103 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
| 104 |
+
|
| 105 |
+
# Force SHM
|
| 106 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
| 107 |
+
# export NCCL_SOCKET_NTHREADS=1
|
| 108 |
+
# export FI_PROVIDER="tcp"
|
| 109 |
+
|
| 110 |
+
# Print GPU topology information
|
| 111 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
| 112 |
+
echo "=== GPU Topology ==="
|
| 113 |
+
nvidia-smi topo -m
|
| 114 |
+
echo "=================="
|
| 115 |
+
export SRUN_ALLOC_ARGS=""
|
| 116 |
+
else
|
| 117 |
+
export JOBNAME="bench_1.34G_dp4_tp1_pp2_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k"
|
| 118 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
| 119 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
| 120 |
+
fi
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
# Print some debugging information
|
| 124 |
+
echo "Master node: $MASTER_NODE"
|
| 125 |
+
echo "All nodes: $NODELIST"
|
| 126 |
+
echo "World size: $WORLD_SIZE"
|
| 127 |
+
|
| 128 |
+
# Launch the training script using srun in background
|
| 129 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
| 130 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
| 131 |
+
--nnodes=$NNODES \
|
| 132 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 133 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 134 |
+
--rdzv_backend=c10d \
|
| 135 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 136 |
+
--max_restarts 0 \
|
| 137 |
+
--rdzv_conf timeout=60 \
|
| 138 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
| 139 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp1_pp2_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
| 140 |
+
# Store the process ID
|
| 141 |
+
SRUN_PID=$!
|
| 142 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 143 |
+
|
| 144 |
+
# Optionally, you can add:
|
| 145 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 146 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 147 |
+
|
| 148 |
+
else # sbatch mode
|
| 149 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
| 150 |
+
--nnodes=$NNODES \
|
| 151 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 152 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 153 |
+
--rdzv_backend=c10d \
|
| 154 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 155 |
+
--max_restarts 0 \
|
| 156 |
+
--rdzv_conf timeout=60 \
|
| 157 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
| 158 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp1_pp2_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml
|
| 159 |
+
fi
|
scripts/run_1.34G_dp4_tp1_pp2_acc8_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp1_pp2_acc8_mbs64_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp1_pp2_acc8_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp2_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp2_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp2_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp4_pp2_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp4_pp2_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name
|
| 3 |
+
#SBATCH --time=00:40:00
|
| 4 |
+
#SBATCH --partition=hopper-prod
|
| 5 |
+
#SBATCH --qos=high
|
| 6 |
+
|
| 7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 8 |
+
|
| 9 |
+
#SBATCH --nodes=4 # Number of nodes (modify as needed)
|
| 10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
| 15 |
+
|
| 16 |
+
# run using
|
| 17 |
+
# sbatch --nodes=1 run_multinode.sh
|
| 18 |
+
# or
|
| 19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
| 20 |
+
|
| 21 |
+
set -x -e
|
| 22 |
+
|
| 23 |
+
# If not running under SLURM, set default SLURM environment variables
|
| 24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
| 25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
| 26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
| 27 |
+
exit 1
|
| 28 |
+
fi
|
| 29 |
+
if [ -z "${NNODES}" ]; then
|
| 30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
| 31 |
+
exit 1
|
| 32 |
+
fi
|
| 33 |
+
export SALLOC_MODE=1
|
| 34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
| 35 |
+
export SLURM_NNODES=$NNODES
|
| 36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
| 37 |
+
fi
|
| 38 |
+
|
| 39 |
+
# Load any necessary modules for your system
|
| 40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 41 |
+
module load cuda/12.1
|
| 42 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
| 43 |
+
# unset FI_PROVIDER
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# Activate your conda environment if needed
|
| 47 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 48 |
+
conda activate 2-1-cu121
|
| 49 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 50 |
+
|
| 51 |
+
# Get the node names from SLURM
|
| 52 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
| 53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 54 |
+
|
| 55 |
+
else # srun mode
|
| 56 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
| 57 |
+
fi
|
| 58 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 59 |
+
export MASTER_PORT=12356
|
| 60 |
+
|
| 61 |
+
# Calculate total number of processes
|
| 62 |
+
export NNODES=$SLURM_NNODES
|
| 63 |
+
export GPUS_PER_NODE=8
|
| 64 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 65 |
+
|
| 66 |
+
# Set some environment variables for better distributed training
|
| 67 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 68 |
+
export NCCL_DEBUG=WARN # INFO, WARN
|
| 69 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
| 70 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
| 71 |
+
|
| 72 |
+
# Nanotron specific
|
| 73 |
+
export NANOTRON_BENCHMARK=1
|
| 74 |
+
export WANDB_MODE=disabled
|
| 75 |
+
|
| 76 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
| 77 |
+
|
| 78 |
+
# Trying to avoid hangs
|
| 79 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
| 80 |
+
|
| 81 |
+
# debug
|
| 82 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
| 83 |
+
|
| 84 |
+
# export NCCL_P2P_LEVEL=NVL
|
| 85 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
| 86 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
| 87 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
| 88 |
+
# Test Script - save as test_comm.sh
|
| 89 |
+
|
| 90 |
+
# Test 1 - Force TCP
|
| 91 |
+
# echo "Running with TCP only..."
|
| 92 |
+
# export NCCL_P2P_LEVEL=LOC
|
| 93 |
+
|
| 94 |
+
# # Match bandwidth patterns
|
| 95 |
+
# export NCCL_MAX_NCHANNELS=2
|
| 96 |
+
# export NCCL_MIN_NCHANNELS=2
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
| 100 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
| 101 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
| 102 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
| 103 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
| 104 |
+
|
| 105 |
+
# Force SHM
|
| 106 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
| 107 |
+
# export NCCL_SOCKET_NTHREADS=1
|
| 108 |
+
# export FI_PROVIDER="tcp"
|
| 109 |
+
|
| 110 |
+
# Print GPU topology information
|
| 111 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
| 112 |
+
echo "=== GPU Topology ==="
|
| 113 |
+
nvidia-smi topo -m
|
| 114 |
+
echo "=================="
|
| 115 |
+
export SRUN_ALLOC_ARGS=""
|
| 116 |
+
else
|
| 117 |
+
export JOBNAME="bench_1.34G_dp4_tp4_pp2_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k"
|
| 118 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
| 119 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
| 120 |
+
fi
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
# Print some debugging information
|
| 124 |
+
echo "Master node: $MASTER_NODE"
|
| 125 |
+
echo "All nodes: $NODELIST"
|
| 126 |
+
echo "World size: $WORLD_SIZE"
|
| 127 |
+
|
| 128 |
+
# Launch the training script using srun in background
|
| 129 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
| 130 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
| 131 |
+
--nnodes=$NNODES \
|
| 132 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 133 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 134 |
+
--rdzv_backend=c10d \
|
| 135 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 136 |
+
--max_restarts 0 \
|
| 137 |
+
--rdzv_conf timeout=60 \
|
| 138 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
| 139 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp4_pp2_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
| 140 |
+
# Store the process ID
|
| 141 |
+
SRUN_PID=$!
|
| 142 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 143 |
+
|
| 144 |
+
# Optionally, you can add:
|
| 145 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 146 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 147 |
+
|
| 148 |
+
else # sbatch mode
|
| 149 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
| 150 |
+
--nnodes=$NNODES \
|
| 151 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 152 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 153 |
+
--rdzv_backend=c10d \
|
| 154 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 155 |
+
--max_restarts 0 \
|
| 156 |
+
--rdzv_conf timeout=60 \
|
| 157 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
| 158 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp4_pp2_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml
|
| 159 |
+
fi
|
scripts/run_1.34G_dp4_tp8_pp1_acc1_mbs128_seq8192_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp8_pp1_acc1_mbs128_seq8192_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=4 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp8_pp1_acc1_mbs128_seq8192_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp8_tp1_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#SBATCH --job-name=bench_1.34G_dp8_tp1_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name
|
| 3 |
+
#SBATCH --time=01:10:00
|
| 4 |
+
#SBATCH --partition=hopper-prod
|
| 5 |
+
#SBATCH --qos=high
|
| 6 |
+
|
| 7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 8 |
+
|
| 9 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
| 10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
| 15 |
+
|
| 16 |
+
# run using
|
| 17 |
+
# sbatch --nodes=1 run_multinode.sh
|
| 18 |
+
# or
|
| 19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
| 20 |
+
|
| 21 |
+
set -x -e
|
| 22 |
+
|
| 23 |
+
# If not running under SLURM, set default SLURM environment variables
|
| 24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
| 25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
| 26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
| 27 |
+
exit 1
|
| 28 |
+
fi
|
| 29 |
+
if [ -z "${NNODES}" ]; then
|
| 30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
| 31 |
+
exit 1
|
| 32 |
+
fi
|
| 33 |
+
export SALLOC_MODE=1
|
| 34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
| 35 |
+
export SLURM_NNODES=$NNODES
|
| 36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
| 37 |
+
fi
|
| 38 |
+
|
| 39 |
+
# Load any necessary modules for your system
|
| 40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 41 |
+
module load cuda/12.1
|
| 42 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
| 43 |
+
# unset FI_PROVIDER
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# Activate your conda environment if needed
|
| 47 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 48 |
+
conda activate 2-1-cu121
|
| 49 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 50 |
+
|
| 51 |
+
# Get the node names from SLURM
|
| 52 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
| 53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 54 |
+
|
| 55 |
+
else # srun mode
|
| 56 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
| 57 |
+
fi
|
| 58 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 59 |
+
export MASTER_PORT=12356
|
| 60 |
+
|
| 61 |
+
# Calculate total number of processes
|
| 62 |
+
export NNODES=$SLURM_NNODES
|
| 63 |
+
export GPUS_PER_NODE=8
|
| 64 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 65 |
+
|
| 66 |
+
# Set some environment variables for better distributed training
|
| 67 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 68 |
+
export NCCL_DEBUG=INFO # INFO, WARN
|
| 69 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
| 70 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
| 71 |
+
|
| 72 |
+
# Nanotron specific
|
| 73 |
+
export NANOTRON_BENCHMARK=1
|
| 74 |
+
export WANDB_MODE=disabled
|
| 75 |
+
|
| 76 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
| 77 |
+
|
| 78 |
+
# Trying to avoid hangs
|
| 79 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
| 80 |
+
|
| 81 |
+
# debug
|
| 82 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
| 83 |
+
|
| 84 |
+
# export NCCL_P2P_LEVEL=NVL
|
| 85 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
| 86 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
| 87 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
| 88 |
+
# Test Script - save as test_comm.sh
|
| 89 |
+
|
| 90 |
+
# Test 1 - Force TCP
|
| 91 |
+
# echo "Running with TCP only..."
|
| 92 |
+
# export NCCL_P2P_LEVEL=LOC
|
| 93 |
+
|
| 94 |
+
# # Match bandwidth patterns
|
| 95 |
+
# export NCCL_MAX_NCHANNELS=2
|
| 96 |
+
# export NCCL_MIN_NCHANNELS=2
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
| 100 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
| 101 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
| 102 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
| 103 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
| 104 |
+
|
| 105 |
+
# Force SHM
|
| 106 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
| 107 |
+
# export NCCL_SOCKET_NTHREADS=1
|
| 108 |
+
# export FI_PROVIDER="tcp"
|
| 109 |
+
|
| 110 |
+
# Print GPU topology information
|
| 111 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
| 112 |
+
echo "=== GPU Topology ==="
|
| 113 |
+
nvidia-smi topo -m
|
| 114 |
+
echo "=================="
|
| 115 |
+
export SRUN_ALLOC_ARGS=""
|
| 116 |
+
else
|
| 117 |
+
export JOBNAME="bench_1.34G_dp8_tp1_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k"
|
| 118 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
| 119 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
| 120 |
+
fi
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
# Print some debugging information
|
| 124 |
+
echo "Master node: $MASTER_NODE"
|
| 125 |
+
echo "All nodes: $NODELIST"
|
| 126 |
+
echo "World size: $WORLD_SIZE"
|
| 127 |
+
|
| 128 |
+
# Launch the training script using srun in background
|
| 129 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
| 130 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
| 131 |
+
--nnodes=$NNODES \
|
| 132 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 133 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 134 |
+
--rdzv_backend=c10d \
|
| 135 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 136 |
+
--max_restarts 0 \
|
| 137 |
+
--rdzv_conf timeout=60 \
|
| 138 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
| 139 |
+
--config-file benchmark/configs/config_1.34G_dp8_tp1_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
| 140 |
+
# Store the process ID
|
| 141 |
+
SRUN_PID=$!
|
| 142 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 143 |
+
|
| 144 |
+
# Optionally, you can add:
|
| 145 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 146 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
| 147 |
+
|
| 148 |
+
else # sbatch mode
|
| 149 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
| 150 |
+
--nnodes=$NNODES \
|
| 151 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 152 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 153 |
+
--rdzv_backend=c10d \
|
| 154 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 155 |
+
--max_restarts 0 \
|
| 156 |
+
--rdzv_conf timeout=60 \
|
| 157 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
| 158 |
+
--config-file benchmark/configs/config_1.34G_dp8_tp1_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml
|
| 159 |
+
fi
|
scripts/run_1.34G_dp8_tp1_pp2_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
#SBATCH --job-name=bench_1.34G_dp8_tp1_pp2_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
| 4 |
+
#SBATCH --time=00:02:00
|
| 5 |
+
#SBATCH --partition=hopper-prod
|
| 6 |
+
#SBATCH --qos=high
|
| 7 |
+
|
| 8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
| 9 |
+
|
| 10 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
| 11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
| 12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
| 13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
| 14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
| 15 |
+
|
| 16 |
+
set -x -e
|
| 17 |
+
|
| 18 |
+
# Load any necessary modules for your system
|
| 19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
| 20 |
+
module load cuda/12.1
|
| 21 |
+
|
| 22 |
+
# Activate your conda environment if needed
|
| 23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
| 24 |
+
conda activate 2-1-cu121
|
| 25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Get the node names from SLURM
|
| 28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
| 29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
| 30 |
+
export MASTER_PORT=12356
|
| 31 |
+
|
| 32 |
+
# Calculate total number of processes
|
| 33 |
+
export NNODES=$SLURM_NNODES
|
| 34 |
+
export GPUS_PER_NODE=8
|
| 35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
| 36 |
+
|
| 37 |
+
# Set some environment variables for better distributed training
|
| 38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
| 39 |
+
# export NCCL_DEBUG=INFO
|
| 40 |
+
|
| 41 |
+
# Nanotron specific
|
| 42 |
+
export NANOTRON_BENCHMARK=1
|
| 43 |
+
|
| 44 |
+
# # Disable EFA by changing the provider to tcp
|
| 45 |
+
# export FI_PROVIDER=tcp
|
| 46 |
+
|
| 47 |
+
# # Optionally, you can also unset these EFA-related variables
|
| 48 |
+
# unset FI_EFA_FORK_SAFE
|
| 49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
| 50 |
+
|
| 51 |
+
# # If you want to ensure NCCL uses TCP
|
| 52 |
+
# export NCCL_IB_DISABLE=1
|
| 53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
| 54 |
+
|
| 55 |
+
# Print some debugging information
|
| 56 |
+
echo "Master node: $MASTER_NODE"
|
| 57 |
+
echo "All nodes: $NODELIST"
|
| 58 |
+
echo "World size: $WORLD_SIZE"
|
| 59 |
+
|
| 60 |
+
# Launch the training script using srun
|
| 61 |
+
srun torchrun \
|
| 62 |
+
--nnodes=$NNODES \
|
| 63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
| 64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
| 65 |
+
--rdzv_backend=c10d \
|
| 66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
| 67 |
+
run_train.py \
|
| 68 |
+
--config-file benchmark/configs/config_1.34G_dp8_tp1_pp2_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml
|