diff --git a/scripts/run_1.07G_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l15_h2048_heads16.sh b/scripts/run_1.07G_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l15_h2048_heads16.sh new file mode 100644 index 0000000000000000000000000000000000000000..da3e6d7ecb0c60a96dc04854263efe2b3ee971e6 --- /dev/null +++ b/scripts/run_1.07G_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l15_h2048_heads16.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.07G_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l15_h2048_heads16 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.07G_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l15_h2048_heads16.yaml diff --git a/scripts/run_1.14G_dp16_tp16_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp16_tp16_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5c82dcf6b2e395330a233d01446f4d2386823743 --- /dev/null +++ b/scripts/run_1.14G_dp16_tp16_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp16_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp16_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp16_tp1_pp2_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp16_tp1_pp2_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8501286b2fd23b048e0e8a66308141bbeddb9b56 --- /dev/null +++ b/scripts/run_1.14G_dp16_tp1_pp2_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp1_pp2_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp1_pp2_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp16_tp2_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp16_tp2_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..e477f62816b115325f83287b59f90c1f11c9c5bd --- /dev/null +++ b/scripts/run_1.14G_dp16_tp2_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp2_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp2_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp16_tp32_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp16_tp32_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b1db320b9263edba795d80b5c29ec087d99753a0 --- /dev/null +++ b/scripts/run_1.14G_dp16_tp32_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp16_tp32_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp16_tp32_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp16_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp2_tp16_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..76c48d12a2822eb0361bc49664e148a427a7b76c --- /dev/null +++ b/scripts/run_1.14G_dp2_tp16_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp16_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp16_pp1_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp256_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp2_tp256_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..4d57dfb192e70d1159b58fc4a53bf028cb56e615 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp256_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp256_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp256_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp256_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp2_tp256_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..ad134d8495e83d4d6cad411180ca0dc7bb7c9cb4 --- /dev/null +++ b/scripts/run_1.14G_dp2_tp256_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp256_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp256_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp2_tp64_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp2_tp64_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c3596bd8c620fe09481cb879b62738b7387b9aaa --- /dev/null +++ b/scripts/run_1.14G_dp2_tp64_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp2_tp64_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp2_tp64_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp16_pp1_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp32_tp16_pp1_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..57d7b7e98cc5ed67f5c5f239354e3cf545656066 --- /dev/null +++ b/scripts/run_1.14G_dp32_tp16_pp1_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp16_pp1_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp16_pp1_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp1_pp2_acc2_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp32_tp1_pp2_acc2_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..04694c5f4d550207d9630f81005b90667b74656d --- /dev/null +++ b/scripts/run_1.14G_dp32_tp1_pp2_acc2_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp1_pp2_acc2_mbs2_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp1_pp2_acc2_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp32_tp2_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp32_tp2_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..23fc7f40719f92c3dbda69db7edd5d3ab047d503 --- /dev/null +++ b/scripts/run_1.14G_dp32_tp2_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp32_tp2_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp32_tp2_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..53f0590f11648df65b9f945410c9d5e6031c6d7c --- /dev/null +++ b/scripts/run_1.14G_dp4_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp1_pp2_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp1_pp2_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..034a7c625bc32bb300811e3935c808abe6960c3c --- /dev/null +++ b/scripts/run_1.14G_dp4_tp1_pp2_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp1_pp2_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp1_pp2_acc64_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..468d67be26965fb178b043fdbf8cb7a65379c59f --- /dev/null +++ b/scripts/run_1.14G_dp4_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp32_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp32_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..3c80959df08f11271e635f2c989fab07efae0347 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp32_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp32_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp32_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp4_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..94112fffb513ca35a41b3d542930ba28b2c40302 --- /dev/null +++ b/scripts/run_1.14G_dp4_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp4_tp64_pp1_acc32_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp4_tp64_pp1_acc32_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..e24d0f7484a9a926620906fd76c0db99f3678d4d --- /dev/null +++ b/scripts/run_1.14G_dp4_tp64_pp1_acc32_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp4_tp64_pp1_acc32_mbs1_seq32768_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp4_tp64_pp1_acc32_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp64_tp8_pp1_acc1_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp64_tp8_pp1_acc1_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..cc311ff8476a899065b4ae2bfac50edf03229090 --- /dev/null +++ b/scripts/run_1.14G_dp64_tp8_pp1_acc1_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp64_tp8_pp1_acc1_mbs2_seq32768_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp64_tp8_pp1_acc1_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp1_pp2_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp8_tp1_pp2_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..cab92b9a7d74857b54ee327e91ce1951f5152487 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp1_pp2_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp1_pp2_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp1_pp2_acc16_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab32k.sh b/scripts/run_1.14G_dp8_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..859b001c7423b000764a93f97bc83f5c8a7de243 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp2_pp1_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp32_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp8_tp32_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c3954d2dafbc8b0130b72e91eda22c835c045554 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp32_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp32_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp32_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.14G_dp8_tp64_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh b/scripts/run_1.14G_dp8_tp64_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..e84ffb561e65bffceae36b0932ebb254ac545e04 --- /dev/null +++ b/scripts/run_1.14G_dp8_tp64_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.14G_dp8_tp64_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.14G_dp8_tp64_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml diff --git a/scripts/run_1.34G_dp16_tp16_pp1_acc1_mbs32_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp16_tp16_pp1_acc1_mbs32_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f29e11e504eb16ef8e0330239a2bc68f30cce4c --- /dev/null +++ b/scripts/run_1.34G_dp16_tp16_pp1_acc1_mbs32_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp16_pp1_acc1_mbs32_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp16_pp1_acc1_mbs32_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp16_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp16_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..2502303ff821bbea8054e52571a0997cf4db95be --- /dev/null +++ b/scripts/run_1.34G_dp16_tp16_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp16_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp16_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp16_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp16_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..1bd1a9108b2efe8223677ea52106fc2a4572e440 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp16_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp16_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp16_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp1_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp1_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b324ca9df6e5b6964c0441e7ee20398828f941ab --- /dev/null +++ b/scripts/run_1.34G_dp16_tp1_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp1_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp1_pp1_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8296bf752e9626b1834a0bb4c41cc9c77eaf95c5 --- /dev/null +++ b/scripts/run_1.34G_dp16_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp1_pp2_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp16_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c02921b1c1cd9c009ea40668bc7bfd7ae2ac4def --- /dev/null +++ b/scripts/run_1.34G_dp16_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp16_tp4_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp16_tp4_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8c8b05fff594a7be4c22a54ff3bcb49ffb64772e --- /dev/null +++ b/scripts/run_1.34G_dp16_tp4_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp16_tp4_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp16_tp4_pp1_acc16_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp128_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp128_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..7c7535c7abf2354d9398c08da890e6cf57039ea9 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp128_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp128_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp128_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..be652e7583524716af0352f218c41dc97a76a733 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp1_pp8_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp1_pp8_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b1eb209b5e810d07d21d9c4b58bf79da855dbc68 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp1_pp8_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,128 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp2_tp1_pp8_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +export NCCL_NET_GDR_LEVEL=LOC + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp2_tp1_pp8_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp1_pp8_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp1_pp8_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp2_tp256_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp2_tp256_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a7a98017b55062c0272716dda26d5686a4e51244 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp256_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp256_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp256_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..6d9db812c19a1635766cf5bc44c059be3e0f1215 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp2_pp8_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f0790313923c6312903490c118c8ebb902385a40 --- /dev/null +++ b/scripts/run_1.34G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp2_tp8_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp32_tp4_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp32_tp4_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..41c8cf6bf15b69a27ffe5a4a340bf2306d9e0c79 --- /dev/null +++ b/scripts/run_1.34G_dp32_tp4_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp32_tp4_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp32_tp4_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp128_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp128_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f39f24148898b09ca394760037228a9ebd5b28c9 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp128_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp128_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp128_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp128_pp1_acc32_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp4_tp128_pp1_acc32_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..61c2c46ceb080f60188a2f77b31875196d1fcf6a --- /dev/null +++ b/scripts/run_1.34G_dp4_tp128_pp1_acc32_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp128_pp1_acc32_mbs1_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp128_pp1_acc32_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp128_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp128_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..6f0ba94fc85c4a520e70f8839c1d2162b29cf945 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp128_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp128_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp128_pp1_acc4_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp4_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp4_tp4_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..c0a7d5151cdfa734a4eb8beb542b9aa874911abe --- /dev/null +++ b/scripts/run_1.34G_dp4_tp4_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp4_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp4_pp1_acc1_mbs32_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..b139844acfb2a8762ecdc89738e169cea20953fa --- /dev/null +++ b/scripts/run_1.34G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp4_pp1_acc32_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp4_tp64_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp64_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..d5aadcae6e909db4f9eb88e5f0be05a4db6eaa9d --- /dev/null +++ b/scripts/run_1.34G_dp4_tp64_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp64_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp64_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..41ca3ad264f6d91847ddac8eeda164ccc8662e85 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp4_tp8_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp4_tp8_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..de4b460effa6df876468b22bb1930336b4a19fe8 --- /dev/null +++ b/scripts/run_1.34G_dp4_tp8_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp4_tp8_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp4_tp8_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp64_tp4_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp64_tp4_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..e434d45057e5717424ca5437d20e3dc4c0160157 --- /dev/null +++ b/scripts/run_1.34G_dp64_tp4_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp64_tp4_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=32 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp64_tp4_pp1_acc8_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..77d75f245ccd4eabc56b49ea0cc4d86344bef7a7 --- /dev/null +++ b/scripts/run_1.34G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp64_tp8_pp1_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp8_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp8_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..682464a7b9e68d311e43086fae4d799ea6fd6368 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp16_pp1_acc4_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_1.34G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8b7eacbe10f5210873a5843dfef8ff50cd91f326 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_1.34G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_1.34G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp1_pp2_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_1.34G_dp8_tp2_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp8_tp2_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5adc154f6c91ca53bf2b2bad5fcbc43960b04627 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp2_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp2_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp2_pp1_acc1_mbs64_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_1.34G_dp8_tp64_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh b/scripts/run_1.34G_dp8_tp64_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..7f369b9470e9bcc3b0845cf0e657daa8bd2c1f58 --- /dev/null +++ b/scripts/run_1.34G_dp8_tp64_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab131k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_1.34G_dp8_tp64_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab131k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_1.34G_dp8_tp64_pp1_acc16_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml diff --git a/scripts/run_3.27G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k.sh b/scripts/run_3.27G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k.sh new file mode 100644 index 0000000000000000000000000000000000000000..1690729b2056665ed9d8e2a1cf43092af4d1f44b --- /dev/null +++ b/scripts/run_3.27G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_3.27G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k # Job name +#SBATCH --time=00:02:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_3.27G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_vocab32k.yaml diff --git a/scripts/run_3.27G_dp8_tp16_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l28_h3072_heads24.sh b/scripts/run_3.27G_dp8_tp16_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l28_h3072_heads24.sh new file mode 100644 index 0000000000000000000000000000000000000000..b11169e40a53b2257bc741c2b79cfba46227ba24 --- /dev/null +++ b/scripts/run_3.27G_dp8_tp16_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l28_h3072_heads24.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_3.27G_dp8_tp16_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l28_h3072_heads24 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_3.27G_dp8_tp16_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l28_h3072_heads24.yaml diff --git a/scripts/run_3.57G_dp1_tp1_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp1_tp1_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8b73b43654b20805de93060778fdb73144c3b846 --- /dev/null +++ b/scripts/run_3.57G_dp1_tp1_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_3.57G_dp1_tp1_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp1_tp1_pp8_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_3.57G_dp1_tp2_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_prof.sh b/scripts/run_3.57G_dp1_tp2_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_prof.sh new file mode 100644 index 0000000000000000000000000000000000000000..3baccb20ee9d80d32457351a78cc03b9d0c20688 --- /dev/null +++ b/scripts/run_3.57G_dp1_tp2_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_prof.sh @@ -0,0 +1,161 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp1_tp2_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_prof # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e +echo "Running script: $0" + + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=2 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp1_tp2_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_prof" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp1_tp2_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_prof.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp1_tp2_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_prof.yaml +fi diff --git a/scripts/run_3.57G_dp1_tp32_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp1_tp32_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..489fcdcac258a48341b5f6235674d5dc17e1b71c --- /dev/null +++ b/scripts/run_3.57G_dp1_tp32_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp1_tp32_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=8 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp1_tp32_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp1_tp32_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp1_tp32_pp2_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp2_tp4_pp2_acc1_mbs128_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp2_tp4_pp2_acc1_mbs128_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..91c31d761ca04a8172309c1419978daae4216df4 --- /dev/null +++ b/scripts/run_3.57G_dp2_tp4_pp2_acc1_mbs128_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp2_tp4_pp2_acc1_mbs128_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp2_tp4_pp2_acc1_mbs128_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp4_pp2_acc1_mbs128_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp2_tp4_pp2_acc1_mbs128_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp64_tp1_pp2_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp64_tp1_pp2_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..51ac98fc97f63e1e4649bbfad0b307df53f2e0a7 --- /dev/null +++ b/scripts/run_3.57G_dp64_tp1_pp2_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp64_tp1_pp2_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp64_tp1_pp2_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp1_pp2_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp1_pp2_acc1_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp64_tp4_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp64_tp4_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f5aeafab96620d1051870906eca8bb4f60ec45c0 --- /dev/null +++ b/scripts/run_3.57G_dp64_tp4_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp64_tp4_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp64_tp4_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp4_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp64_tp4_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_3.57G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_3.57G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..5ec7583e6a332f59e8fb353216e77d1c9625fdf0 --- /dev/null +++ b/scripts/run_3.57G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_3.57G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_3.57G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_3.57G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_4.33G_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l28_h3072_heads20.sh b/scripts/run_4.33G_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l28_h3072_heads20.sh new file mode 100644 index 0000000000000000000000000000000000000000..bc796b03ffc7e8a73272d7a510f53c9b28c88f4a --- /dev/null +++ b/scripts/run_4.33G_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l28_h3072_heads20.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +#SBATCH --job-name=bench_4.33G_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l28_h3072_heads20 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_4.33G_dp128_tp1_pp1_acc4_mbs2_seq4096_zero0_l28_h3072_heads20.yaml diff --git a/scripts/run_469G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..fe6076f443df1f5b7002565303f57652641b188b --- /dev/null +++ b/scripts/run_469G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp4_tp16_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp4_tp16_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..409e8b27e17c44c2d87506af19af986f66ea0757 --- /dev/null +++ b/scripts/run_469G_dp4_tp16_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp4_tp16_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp4_tp16_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp16_pp2_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_469G_dp4_tp2_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_469G_dp4_tp2_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..8d7920207ece7f6831662d5b89615334e3ec43a8 --- /dev/null +++ b/scripts/run_469G_dp4_tp2_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_469G_dp4_tp2_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_469G_dp4_tp2_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp2_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_469G_dp4_tp2_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_5.5G_dp64_tp2_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.sh b/scripts/run_5.5G_dp64_tp2_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.sh new file mode 100644 index 0000000000000000000000000000000000000000..f1e336048b9f080ca1171124624f755ad09f9624 --- /dev/null +++ b/scripts/run_5.5G_dp64_tp2_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +#SBATCH --job-name=bench_5.5G_dp64_tp2_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=64 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# # Disable EFA by changing the provider to tcp +# export FI_PROVIDER=tcp + +# # Optionally, you can also unset these EFA-related variables +# unset FI_EFA_FORK_SAFE +# unset FI_EFA_ENABLE_SHM_TRANSFER + +# # If you want to ensure NCCL uses TCP +# export NCCL_IB_DISABLE=1 +# export NCCL_SOCKET_IFNAME=eth0 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_5.5G_dp64_tp2_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml diff --git a/scripts/run_8.86G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..7b73ecdbf80983f2e64c768e59e2527b0eeeffc2 --- /dev/null +++ b/scripts/run_8.86G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:40:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp2_tp8_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp4_tp4_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp4_tp4_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..d62712e6074ceed92f5f05217a13a608595c169b --- /dev/null +++ b/scripts/run_8.86G_dp4_tp4_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp4_tp4_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=4 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp4_tp4_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp4_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp4_tp4_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_8.86G_dp64_tp2_pp1_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_8.86G_dp64_tp2_pp1_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..a563265c0c35ba5ff1fd1f89ebc0a47949f2c660 --- /dev/null +++ b/scripts/run_8.86G_dp64_tp2_pp1_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#SBATCH --job-name=bench_8.86G_dp64_tp2_pp1_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_8.86G_dp64_tp2_pp1_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp64_tp2_pp1_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_8.86G_dp64_tp2_pp1_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_80G_dp1_tp1_pp128_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp1_tp1_pp128_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..ad035d92e683a6f8281594393932413d6f816d5d --- /dev/null +++ b/scripts/run_80G_dp1_tp1_pp128_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_80G_dp1_tp1_pp128_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp1_tp1_pp128_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_80G_dp1_tp2_pp4_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp1_tp2_pp4_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..9db5c618781f4c8135f923c42d05a5bee08dbb8b --- /dev/null +++ b/scripts/run_80G_dp1_tp2_pp4_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_80G_dp1_tp2_pp4_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp1_tp2_pp4_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_80G_dp1_tp2_pp4_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp1_tp2_pp4_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..d6fcb93601705aa6dee26549e047d114e0b95a3e --- /dev/null +++ b/scripts/run_80G_dp1_tp2_pp4_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_80G_dp1_tp2_pp4_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp1_tp2_pp4_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_80G_dp1_tp32_pp4_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp1_tp32_pp4_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..f4593bc7880a057d1b70f69bf18eca3fd5749078 --- /dev/null +++ b/scripts/run_80G_dp1_tp32_pp4_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH --job-name=bench_80G_dp1_tp32_pp4_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k # Job name +#SBATCH --time=00:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high +#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102 + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=WARN # INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +# Disable wandb +export WANDB_MODE=disabled + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + + +# Print GPU topology information +echo "=== GPU Topology ===" +nvidia-smi topo -m +echo "==================" + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp1_tp32_pp4_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml diff --git a/scripts/run_80G_dp2_tp4_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh b/scripts/run_80G_dp2_tp4_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh new file mode 100644 index 0000000000000000000000000000000000000000..fe54e6598450445fdb50054b8e5b103dac5abdac --- /dev/null +++ b/scripts/run_80G_dp2_tp4_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh @@ -0,0 +1,159 @@ +#!/bin/bash +#SBATCH --job-name=bench_80G_dp2_tp4_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name +#SBATCH --time=01:10:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=16 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes +#SBATCH --wait-all-nodes=1 # fail if any node is not ready + +# run using +# sbatch --nodes=1 run_multinode.sh +# or +# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh + +set -x -e + +# If not running under SLURM, set default SLURM environment variables +if [ -z "${SLURM_JOB_ID}" ]; then + if [ -z "${SALLOC_JOBID}" ]; then + echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + if [ -z "${NNODES}" ]; then + echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session." + exit 1 + fi + export SALLOC_MODE=1 + export SLURM_JOB_ID=$SALLOC_JOBID + export SLURM_NNODES=$NNODES + export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N") +fi + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 +# Unset FI_PROVIDER to avoid potential libfabric provider issues +# unset FI_PROVIDER + + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +if [ -z "${SALLOC_MODE}" ]; then # sbatch mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` + +else # srun mode + export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES` +fi +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO # INFO, WARN +# export NCCL_DEBUG_SUBSYS=ALL +# export CUDA_LAUNCH_BLOCKING=1 + +# Nanotron specific +export NANOTRON_BENCHMARK=1 +export WANDB_MODE=disabled + +# export TORCH_NCCL_USE_COMM_NONBLOCKING=1 + +# Trying to avoid hangs +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 + +# debug +export TORCH_DISTRIBUTED_DEBUG=DETAIL + +# export NCCL_P2P_LEVEL=NVL +# export CUDA_LAUNCH_BLOCKING=1 +# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA +# export NCCL_NET_GDR_LEVEL=LOC +# Test Script - save as test_comm.sh + +# Test 1 - Force TCP +# echo "Running with TCP only..." +# export NCCL_P2P_LEVEL=LOC + +# # Match bandwidth patterns +# export NCCL_MAX_NCHANNELS=2 +# export NCCL_MIN_NCHANNELS=2 + + +# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA +# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport +# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport +# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds +# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well + +# Force SHM +# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode +# export NCCL_SOCKET_NTHREADS=1 +# export FI_PROVIDER="tcp" + +# Print GPU topology information +if [ -z "${SALLOC_MODE}" ]; then + echo "=== GPU Topology ===" + nvidia-smi topo -m + echo "==================" + export SRUN_ALLOC_ARGS="" +else + export JOBNAME="bench_80G_dp2_tp4_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k" + export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out" + export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME" +fi + + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun in background +if [ -n "${SALLOC_MODE}" ]; then # srun mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp4_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 & + # Store the process ID + SRUN_PID=$! + echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE + + # Optionally, you can add: + echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE + echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE + +else # sbatch mode + srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + --max_restarts 0 \ + --rdzv_conf timeout=60 \ + /fsx/nouamane/projects/nanotron/run_train.py \ + --config-file benchmark/configs/config_80G_dp2_tp4_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +fi diff --git a/scripts/run_dp16_tp1_pp1_acc1_mbs8_seq2048.sh b/scripts/run_dp16_tp1_pp1_acc1_mbs8_seq2048.sh new file mode 100644 index 0000000000000000000000000000000000000000..89cc0636f4c057e068d16517fa2cc2fa80a86a11 --- /dev/null +++ b/scripts/run_dp16_tp1_pp1_acc1_mbs8_seq2048.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +#SBATCH --job-name=bench_dp16_tp1_pp1_acc1_mbs8_seq2048 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%x-%j.out + +#SBATCH --nodes=2 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=80 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_dp16_tp1_pp1_acc1_mbs8_seq2048.yaml diff --git a/scripts/run_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h1024_heads16.sh b/scripts/run_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h1024_heads16.sh new file mode 100644 index 0000000000000000000000000000000000000000..10937e39142a378e30b660460a1d1732ed32d343 --- /dev/null +++ b/scripts/run_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h1024_heads16.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +#SBATCH --job-name=bench_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h1024_heads16 # Job name +#SBATCH --time=00:15:00 +#SBATCH --partition=hopper-prod +#SBATCH --qos=high + +#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out + +#SBATCH --nodes=1 # Number of nodes (modify as needed) +#SBATCH --ntasks-per-node=1 # Number of tasks per node +#SBATCH --cpus-per-task=60 # CPU cores per task +#SBATCH --gres=gpu:8 # Number of GPUs per node +#SBATCH --exclusive # Exclusive use of nodes + +set -x -e + +# Load any necessary modules for your system +source /etc/profile.d/modules.sh # for some reason module isn't loaded +module load cuda/12.1 + +# Activate your conda environment if needed +source /fsx/nouamane/miniconda/bin/activate +conda activate 2-1-cu121 +export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH + +# Get the node names from SLURM +export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST` +export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1` +export MASTER_PORT=12356 + +# Calculate total number of processes +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE)) + +# Set some environment variables for better distributed training +export CUDA_DEVICE_MAX_CONNECTIONS=1 +# export NCCL_DEBUG=INFO + +# Nanotron specific +export NANOTRON_BENCHMARK=1 + +# Print some debugging information +echo "Master node: $MASTER_NODE" +echo "All nodes: $NODELIST" +echo "World size: $WORLD_SIZE" + +# Launch the training script using srun +srun torchrun \ + --nnodes=$NNODES \ + --nproc_per_node=$GPUS_PER_NODE \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \ + run_train.py \ + --config-file benchmark/configs/config_dp8_tp1_pp1_acc1_mbs1_seq4096_zero0_l12_h1024_heads16.yaml