diff --git a/scripts/run_1.14G_dp128_tp4_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp128_tp4_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a7a3076a5fcd5f32936c20f125db737d87d11277 --- /dev/null +++ b/scripts/run_1.14G_dp128_tp4_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp128_tp4_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp128_tp4_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp16_tp16_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.sh b/scripts/run_1.14G_dp16_tp16_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.sh new file mode 100644 index 0000000000000000000000000000000000000000..b060ff7aca43608ebcff981823dbdbc2ff4df01f --- /dev/null +++ b/scripts/run_1.14G_dp16_tp16_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp16_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp16_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.yaml diff --git a/scripts/run_1.14G_dp16_tp2_pp1_acc4_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp16_tp2_pp1_acc4_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c0061ac0b4635aa395953bd3d8d593b30a71a39f --- /dev/null +++ b/scripts/run_1.14G_dp16_tp2_pp1_acc4_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp2_pp1_acc4_mbs8_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp2_pp1_acc4_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp16_tp8_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp16_tp8_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8f453b9d589b87e29f537c71dda0fd2c08b6a61c --- /dev/null +++ b/scripts/run_1.14G_dp16_tp8_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp8_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp8_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp1_tp8_pp1_acc1_mbs128_seq2048_zero0_tpmodeRED_vocab32k_prof.sh b/scripts/run_1.14G_dp1_tp8_pp1_acc1_mbs128_seq2048_zero0_tpmodeRED_vocab32k_prof.sh new file mode 100644 index 0000000000000000000000000000000000000000..c8ce7e25557a9004dd940e557f648b8b47f8825b --- /dev/null +++ b/scripts/run_1.14G_dp1_tp8_pp1_acc1_mbs128_seq2048_zero0_tpmodeRED_vocab32k_prof.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp1_tp8_pp1_acc1_mbs128_seq2048_zero0_tpmodeRED_vocab32k_prof # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp1_tp8_pp1_acc1_mbs128_seq2048_zero0_tpmodeRED_vocab32k_prof.yaml diff --git a/scripts/run_1.14G_dp1_tp8_pp2_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k_prof.sh b/scripts/run_1.14G_dp1_tp8_pp2_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k_prof.sh new file mode 100644 index 0000000000000000000000000000000000000000..3ef5b8726fc6a73df4b50f328603f10ff4603483 --- /dev/null +++ b/scripts/run_1.14G_dp1_tp8_pp2_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k_prof.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp1_tp8_pp2_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k_prof # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp1_tp8_pp2_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k_prof.yaml diff --git a/scripts/run_1.14G_dp2_tp16_pp1_acc16_mbs4_seq32768_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp2_tp16_pp1_acc16_mbs4_seq32768_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9db853076b6ccdb4b624257ad0030b13e8556e10 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp16_pp1_acc16_mbs4_seq32768_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp16_pp1_acc16_mbs4_seq32768_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp16_pp1_acc16_mbs4_seq32768_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp16_pp1_acc4_mbs64_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp2_tp16_pp1_acc4_mbs64_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..adb13e133472ffbc0fed11db32d589f45f00ab1f --- /dev/null +++ b/scripts/run_1.14G_dp2_tp16_pp1_acc4_mbs64_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp16_pp1_acc4_mbs64_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp16_pp1_acc4_mbs64_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp8_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp2_tp8_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0ec7de355024bfe3183beb1cc23c034b389bd818 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp8_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp8_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp8_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp32_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..1bbc0d841b33ee6d318bdfff3d0fda54b9a3a453 --- /dev/null +++ b/scripts/run_1.14G_dp32_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp128_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp128_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c4aecefba956c6d7c98267652a5e0da2e68eb9ef --- /dev/null +++ b/scripts/run_1.14G_dp4_tp128_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp128_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp128_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..157d585f8f575e6bd2a1a0cb06c518247df608be --- /dev/null +++ b/scripts/run_1.14G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp32_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp4_tp32_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..ca4f09afaba3a68cb1a80e1beed3468631540314 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp32_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp32_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp32_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp4_pp1_acc8_mbs16_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp4_tp4_pp1_acc8_mbs16_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..42c40b4206f9497973b06cddf2592ba7562b1b17 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp4_pp1_acc8_mbs16_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp4_pp1_acc8_mbs16_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp4_pp1_acc8_mbs16_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp64_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp64_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..792f64b6d6019d4bba1c262d7868e380707a3170 --- /dev/null +++ b/scripts/run_1.14G_dp64_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp64_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp64_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp16_pp1_acc16_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp8_tp16_pp1_acc16_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..29968782920c78ae4eee378f390781f658ffb818 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp16_pp1_acc16_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp16_pp1_acc16_mbs1_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp16_pp1_acc16_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..84a52aefd94fa2350182b7528e46b0625c1b7bea --- /dev/null +++ b/scripts/run_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp4_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp8_tp4_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9aad48b0e7b4a24348bb77eba2636b2a7bac0264 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp4_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp4_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp4_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.34G_dp16_tp16_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp16_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0801a4c014ec028ab301a658084a1929fc6867d3 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp16_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp16_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp16_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0be0df3104c08e827816cb85a09607da40559a64 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp16_pp1_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp1_pp2_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp1_pp2_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c9d4ef15e7ed791c0f67fb9fa227553c476f2d05 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp1_pp2_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp1_pp2_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp1_pp2_acc32_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b022f657e94efc18c2c29d79e494f82057b53a22 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp8_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5533c84f2f0fc869b592c22420d1619501a35d9a --- /dev/null +++ b/scripts/run_1.34G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp1_tp1_pp16_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp1_tp1_pp16_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..d383b88f459e4fcc52b6519eb18e329d49a2932e --- /dev/null +++ b/scripts/run_1.34G_dp1_tp1_pp16_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp1_tp1_pp16_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp1_pp16_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp1_tp4_pp32_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp1_tp4_pp32_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f67649e1162d82f413fcf6c20ec131ce1874d14b --- /dev/null +++ b/scripts/run_1.34G_dp1_tp4_pp32_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp1_tp4_pp32_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp1_tp4_pp32_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp256_tp2_pp1_acc1_mbs8_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp256_tp2_pp1_acc1_mbs8_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..35166d06acf67e39dd74993be995be261b15e244 --- /dev/null +++ b/scripts/run_1.34G_dp256_tp2_pp1_acc1_mbs8_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp256_tp2_pp1_acc1_mbs8_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp256_tp2_pp1_acc1_mbs8_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9a6be0b1ea934771489a774c60b126749b5a1a8e --- /dev/null +++ b/scripts/run_1.34G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp16_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp2_tp256_pp1_acc256_mbs1_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp256_pp1_acc256_mbs1_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..40c0d4bfe796687b3d22b853999f4596bee69fda --- /dev/null +++ b/scripts/run_1.34G_dp2_tp256_pp1_acc256_mbs1_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp256_pp1_acc256_mbs1_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp256_pp1_acc256_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..1c34950e6916075d745acf559f06cd15ea44adfe --- /dev/null +++ b/scripts/run_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp2_tp32_pp1_acc64_mbs16_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp32_pp1_acc64_mbs16_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..83b378e4c9f0c9660cce569a0305acc315f50f80 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp32_pp1_acc64_mbs16_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp32_pp1_acc64_mbs16_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp32_pp1_acc64_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp4_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp4_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9acdce02789bfe59cf958461cdfc14b8fc5ca89e --- /dev/null +++ b/scripts/run_1.34G_dp2_tp4_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp2_tp4_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp2_tp4_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp4_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp4_pp16_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..844c17e97f7ccd26eccdb30c242f8c873cf01923 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp4_pp1_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp64_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp64_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c77140d0c96664922ba82dece0dd9bb83df7ff6 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp64_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp64_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp64_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp8_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp8_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c624ff16a691d463af85040f3787211b90f62f9d --- /dev/null +++ b/scripts/run_1.34G_dp2_tp8_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp8_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp8_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp8_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp8_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..cbe9118cc653e3647779d73121dd919242146722 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp8_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp8_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp8_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp32_tp1_pp1_acc16_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp32_tp1_pp1_acc16_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..22b4c3c2d9331170630d30e41fd3dc91db8a7956 --- /dev/null +++ b/scripts/run_1.34G_dp32_tp1_pp1_acc16_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp32_tp1_pp1_acc16_mbs1_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp1_pp1_acc16_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp32_tp1_pp1_acc1_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp32_tp1_pp1_acc1_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..3593814571383966f44bc1af6708a420c9dff03c --- /dev/null +++ b/scripts/run_1.34G_dp32_tp1_pp1_acc1_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp32_tp1_pp1_acc1_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp1_pp1_acc1_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp32_tp4_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp32_tp4_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..7019bf3a8d911119b3f802937b99c204bd504cf0 --- /dev/null +++ b/scripts/run_1.34G_dp32_tp4_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp32_tp4_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp4_pp1_acc1_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp32_tp4_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp32_tp4_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..2c28114507c20a3031ea8cd02927877e0c39638c --- /dev/null +++ b/scripts/run_1.34G_dp32_tp4_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp32_tp4_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp4_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp32_tp8_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp32_tp8_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..17d3665e43a4c8f8c18068a1d448669ea13f07e8 --- /dev/null +++ b/scripts/run_1.34G_dp32_tp8_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp32_tp8_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp8_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..dad9c3e99226622dc701f64c629341f7abcdab7b --- /dev/null +++ b/scripts/run_1.34G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=normal + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp4_tp16_pp1_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp16_pp1_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..89623a8d7f45076a27201942f50d9868457e4b8a --- /dev/null +++ b/scripts/run_1.34G_dp4_tp16_pp1_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp16_pp1_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp16_pp1_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp32_pp1_acc4_mbs128_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp32_pp1_acc4_mbs128_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..49119460e853ae6eb78f43d2d068508834fc4908 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp32_pp1_acc4_mbs128_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp32_pp1_acc4_mbs128_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp32_pp1_acc4_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp4_pp1_acc32_mbs1_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp4_pp1_acc32_mbs1_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f1f8dafe8f050656529df05d7446589a32764fe --- /dev/null +++ b/scripts/run_1.34G_dp4_tp4_pp1_acc32_mbs1_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp4_pp1_acc32_mbs1_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp4_pp1_acc32_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..ee7341f4dab54ece4553cc4be60ba4957b41ce34 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp4_pp1_acc4_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp16_pp1_acc32_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp8_tp16_pp1_acc32_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f3fac1a9d7364fd563d8f107c127bf79d2623df2 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp16_pp1_acc32_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp16_pp1_acc32_mbs8_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp16_pp1_acc32_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp8_pp2_acc2_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp8_tp8_pp2_acc2_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..27a2b92b1d91a57701ea976b3eb78ea8f75c7b84 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp8_pp2_acc2_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp8_tp8_pp2_acc2_mbs16_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp8_tp8_pp2_acc2_mbs16_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp8_pp2_acc2_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp8_pp2_acc2_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_2.28G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l26_h2304_heads16.sh b/scripts/run_2.28G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l26_h2304_heads16.sh new file mode 100644 index 0000000000000000000000000000000000000000..386f23a42601006ac6a00278d8ecf3d6f512d619 --- /dev/null +++ b/scripts/run_2.28G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l26_h2304_heads16.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_2.28G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l26_h2304_heads16 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specificsbat +# export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_2.28G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l26_h2304_heads16.yaml diff --git a/scripts/run_2.28G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_l26_h2304_heads16.sh b/scripts/run_2.28G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_l26_h2304_heads16.sh new file mode 100644 index 0000000000000000000000000000000000000000..a24366c82a58060e7d54de0838371ca0efe7a0c6 --- /dev/null +++ b/scripts/run_2.28G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_l26_h2304_heads16.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +#SBATCH --job-name=bench_2.28G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_l26_h2304_heads16 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_2.28G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_l26_h2304_heads16.yaml diff --git a/scripts/run_3.56G_dp8_tp8_pp1_acc1_mbs16_seq2048_zero0_l28_h3072_heads24.sh b/scripts/run_3.56G_dp8_tp8_pp1_acc1_mbs16_seq2048_zero0_l28_h3072_heads24.sh new file mode 100644 index 0000000000000000000000000000000000000000..806c67e94b8949d7e1ca553144912de90a834293 --- /dev/null +++ b/scripts/run_3.56G_dp8_tp8_pp1_acc1_mbs16_seq2048_zero0_l28_h3072_heads24.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +#SBATCH --job-name=bench_3.56G_dp8_tp8_pp1_acc1_mbs16_seq2048_zero0_l28_h3072_heads24 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_3.56G_dp8_tp8_pp1_acc1_mbs16_seq2048_zero0_l28_h3072_heads24.yaml diff --git a/scripts/run_3.57G_dp16_tp8_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp16_tp8_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..66ba00f2f2bc26d4117630ff4a49c4bb80abeeaa --- /dev/null +++ b/scripts/run_3.57G_dp16_tp8_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp16_tp8_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp16_tp8_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp16_tp8_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp16_tp8_pp4_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp2_tp1_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp2_tp1_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..6934535617b46f6464a51d96d360213873980a14 --- /dev/null +++ b/scripts/run_3.57G_dp2_tp1_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp2_tp1_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp2_tp1_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp1_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp1_pp8_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp2_tp2_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp2_tp2_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..3f47eebab37edc2724ebec0bb2a21ebdf2822c93 --- /dev/null +++ b/scripts/run_3.57G_dp2_tp2_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_3.57G_dp2_tp2_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp2_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_3.57G_dp2_tp32_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp2_tp32_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..72a85a8f6530034cdc8a9bdc1ce06bd059db0b43 --- /dev/null +++ b/scripts/run_3.57G_dp2_tp32_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp2_tp32_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp2_tp32_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp32_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp32_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..bdb9f64ba9121b7fdf9e0a94e0375c53f8bd5c9e --- /dev/null +++ b/scripts/run_3.57G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_3.57G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_3.57G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8cbb8acbdaee3aa2db14aaa9d493ddbad7972bec --- /dev/null +++ b/scripts/run_3.57G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp4_tp1_pp32_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f946334c427980f17905fcfeab9ae667d1dd9ea6 --- /dev/null +++ b/scripts/run_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.8G_dp1_tp1_pp8_acc7_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache.sh b/scripts/run_3.8G_dp1_tp1_pp8_acc7_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache.sh new file mode 100644 index 0000000000000000000000000000000000000000..44e632a1b90db4286c2788f15be3a70cc5a078c0 --- /dev/null +++ b/scripts/run_3.8G_dp1_tp1_pp8_acc7_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.8G_dp1_tp1_pp8_acc7_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.8G_dp1_tp1_pp8_acc7_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.8G_dp1_tp1_pp8_acc7_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.8G_dp1_tp1_pp8_acc7_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml +fi diff --git a/scripts/run_37.8G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.sh b/scripts/run_37.8G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.sh new file mode 100644 index 0000000000000000000000000000000000000000..f826a95da527a10ea83c93f82090ca3642e725e1 --- /dev/null +++ b/scripts/run_37.8G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_37.8G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_37.8G_dp1_tp16_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml diff --git a/scripts/run_467G_dp1_tp4_pp8_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab49k_gqa8.sh b/scripts/run_467G_dp1_tp4_pp8_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab49k_gqa8.sh new file mode 100644 index 0000000000000000000000000000000000000000..ab4048461701c9ccf016ffdcae795c77d0550262 --- /dev/null +++ b/scripts/run_467G_dp1_tp4_pp8_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab49k_gqa8.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_467G_dp1_tp4_pp8_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab49k_gqa8 # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_467G_dp1_tp4_pp8_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab49k_gqa8" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_467G_dp1_tp4_pp8_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab49k_gqa8.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_467G_dp1_tp4_pp8_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab49k_gqa8.yaml +fi diff --git a/scripts/run_469G_dp128_tp4_pp1_acc2_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp128_tp4_pp1_acc2_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..3abeb24d087f2f6ceaa2927c927420aa13697fbe --- /dev/null +++ b/scripts/run_469G_dp128_tp4_pp1_acc2_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp128_tp4_pp1_acc2_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp128_tp4_pp1_acc2_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp128_tp4_pp1_acc2_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp128_tp4_pp1_acc2_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp8_tp32_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp8_tp32_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8ac06f8bf5966beed0fe7bc2818e7f202435e867 --- /dev/null +++ b/scripts/run_469G_dp8_tp32_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp8_tp32_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp8_tp32_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp8_tp32_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp8_tp32_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..efbf65671605518744f06820affef06d450f932a --- /dev/null +++ b/scripts/run_469G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_469G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_8.86G_dp32_tp1_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp32_tp1_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..6360bb5f341cc4a44838d723d8d16cb3b4392b87 --- /dev/null +++ b/scripts/run_8.86G_dp32_tp1_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp32_tp1_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=normal + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp32_tp1_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp1_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp1_pp2_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp32_tp1_pp8_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp32_tp1_pp8_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5176f2d6fa2dc3d970a8edcccc9935feb3fd2afa --- /dev/null +++ b/scripts/run_8.86G_dp32_tp1_pp8_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp32_tp1_pp8_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp32_tp1_pp8_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp1_pp8_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp32_tp1_pp8_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp4_tp8_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp4_tp8_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..512cb93833e896093a1b226b5615b601513e09d4 --- /dev/null +++ b/scripts/run_8.86G_dp4_tp8_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp4_tp8_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp4_tp8_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp8_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp8_pp8_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b22f05f515a22ac1ad7ff539232de2dd84877ae6 --- /dev/null +++ b/scripts/run_8.86G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_8.86G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_8.86G_dp8_tp4_pp2_acc1_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp8_tp4_pp2_acc1_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b6fd94b69001f96abace53bf16df768e04fa433f --- /dev/null +++ b/scripts/run_8.86G_dp8_tp4_pp2_acc1_mbs32_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_8.86G_dp8_tp4_pp2_acc1_mbs32_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp8_tp4_pp2_acc1_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_8.86G_dp8_tp8_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp8_tp8_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4fedb673cb80a9cfb3832ad4abea520b836deb68 --- /dev/null +++ b/scripts/run_8.86G_dp8_tp8_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp8_tp8_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=normal + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp8_tp8_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp8_tp8_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp8_tp8_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp1_tp2_pp8_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp1_tp2_pp8_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..3d8fed6ac38e2add2cbdd00987c859f9fee1bf20 --- /dev/null +++ b/scripts/run_80G_dp1_tp2_pp8_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_80G_dp1_tp2_pp8_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp1_tp2_pp8_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_80G_dp2_tp1_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp2_tp1_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f7915ec9af586365f62889f4de6a515cb230f4ce --- /dev/null +++ b/scripts/run_80G_dp2_tp1_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp2_tp1_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp2_tp1_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp1_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp1_pp32_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp2_tp2_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp2_tp2_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f867bfbe755e8e177212c58c9742f70e8002124f --- /dev/null +++ b/scripts/run_80G_dp2_tp2_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp2_tp2_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp2_tp2_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp2_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp2_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..0f709f4d1a843878105faab601f413d61191149e --- /dev/null +++ b/scripts/run_80G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp4_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5c4a9aa039e849b929681d90c2818e65f1f5afc5 --- /dev/null +++ b/scripts/run_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp8_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..043d7ba2a97d6ee7b7ce6866154e3fdceb8626dd --- /dev/null +++ b/scripts/run_80G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_80G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml