Muennighoff commited on
Commit
1751082
·
1 Parent(s): 1a09c78

Add TB sbatch

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. sbatch_4b284b84b10c4pyseed1.sh +165 -0
  2. sbatch_4b284b84b10c4pyseed2.sh +165 -0
  3. sbatch_4b284b84b10c4pyseed3.sh +165 -0
  4. sbatch_4b284b84b10c4pyseed4.sh +165 -0
  5. sbatch_4b284b84b20c4pyseed1.sh +165 -0
  6. sbatch_4b284b84b20c4pyseed2.sh +165 -0
  7. sbatch_4b284b84b20c4pyseed3.sh +165 -0
  8. sbatch_4b284b84b20c4pyseed4.sh +165 -0
  9. sbatch_4b284b84b30c4pyseed1.sh +165 -0
  10. sbatch_4b284b84b30c4pyseed2.sh +165 -0
  11. sbatch_4b284b84b30c4pyseed3.sh +165 -0
  12. sbatch_4b284b84b30c4pyseed4.sh +165 -0
  13. sbatch_4b284b84b40c4pyseed1.sh +165 -0
  14. sbatch_4b284b84b40c4pyseed2.sh +165 -0
  15. sbatch_4b284b84b40c4pyseed3.sh +165 -0
  16. sbatch_4b284b84b40c4pyseed4.sh +165 -0
  17. sbatch_4b284b84b50c4pyseed1.sh +165 -0
  18. sbatch_4b284b84b50c4pyseed2.sh +165 -0
  19. sbatch_4b284b84b50c4pyseed3.sh +165 -0
  20. sbatch_4b284b84b50c4pyseed4.sh +165 -0
  21. sbatch_4b284b84b60c4pyseed1.sh +164 -0
  22. sbatch_4b284b84b60c4pyseed2.sh +164 -0
  23. sbatch_4b284b84b60c4pyseed3.sh +164 -0
  24. sbatch_4b284b84b60c4pyseed4.sh +164 -0
  25. sbatch_4b284b84b70c4pyseed1.sh +164 -0
  26. sbatch_4b284b84b70c4pyseed2.sh +164 -0
  27. sbatch_4b284b84b70c4pyseed3.sh +164 -0
  28. sbatch_4b284b84b70c4pyseed4.sh +164 -0
  29. sbatch_4b284b84b80c4pyseed1.sh +164 -0
  30. sbatch_4b284b84b80c4pyseed2.sh +164 -0
  31. sbatch_4b284b84b80c4pyseed3.sh +164 -0
  32. sbatch_4b284b84b80c4pyseed4.sh +164 -0
  33. sbatch_4b284b84b90c4pyseed1.sh +164 -0
  34. sbatch_4b284b84b90c4pyseed2.sh +164 -0
  35. sbatch_4b284b84b90c4pyseed3.sh +164 -0
  36. sbatch_4b284b84b90c4pyseed4.sh +164 -0
  37. tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683756022.nid007048.61333.0 +3 -0
  38. tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683756633.nid006995.89119.0 +3 -0
  39. tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683928600.nid007131.83743.0 +3 -0
  40. tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683974921.nid006671.116485.0 +3 -0
  41. tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683978316.nid006500.80744.0 +3 -0
  42. tensorboard/tensorboard_4b284b84b10c4pyseed2/events.out.tfevents.1683756022.nid005878.51908.0 +3 -0
  43. tensorboard/tensorboard_4b284b84b10c4pyseed2/events.out.tfevents.1683928065.nid006518.16895.0 +3 -0
  44. tensorboard/tensorboard_4b284b84b10c4pyseed2/events.out.tfevents.1683973531.nid007019.108443.0 +3 -0
  45. tensorboard/tensorboard_4b284b84b10c4pyseed2/events.out.tfevents.1683978316.nid006848.10586.0 +3 -0
  46. tensorboard/tensorboard_4b284b84b10c4pyseed3/events.out.tfevents.1683756022.nid006518.29112.0 +3 -0
  47. tensorboard/tensorboard_4b284b84b10c4pyseed3/events.out.tfevents.1683928065.nid005878.6854.0 +3 -0
  48. tensorboard/tensorboard_4b284b84b10c4pyseed3/events.out.tfevents.1683974293.nid006598.20858.0 +3 -0
  49. tensorboard/tensorboard_4b284b84b10c4pyseed3/events.out.tfevents.1683978316.nid007019.25418.0 +3 -0
  50. tensorboard/tensorboard_4b284b84b10c4pyseed4/events.out.tfevents.1683756022.nid006586.32254.0 +3 -0
sbatch_4b284b84b10c4pyseed1.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b10c4pyseed1
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b10c4py.txt
40
+ # "train: 0.1 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.9 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 1 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b10c4pyseed2.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b10c4pyseed2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b10c4py.txt
40
+ # "train: 0.1 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.9 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 2 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b10c4pyseed3.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b10c4pyseed3
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b10c4py.txt
40
+ # "train: 0.1 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.9 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 3 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b10c4pyseed4.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b10c4pyseed4
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b10c4py.txt
40
+ # "train: 0.1 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.9 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 4 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b20c4pyseed1.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b20c4pyseed1
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b20c4py.txt
40
+ # "train: 0.2 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.8 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 1 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b20c4pyseed2.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b20c4pyseed2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b20c4py.txt
40
+ # "train: 0.2 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.8 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 2 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b20c4pyseed3.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b20c4pyseed3
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b20c4py.txt
40
+ # "train: 0.2 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.8 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 3 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b20c4pyseed4.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b20c4pyseed4
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b20c4py.txt
40
+ # "train: 0.2 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.8 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 4 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b30c4pyseed1.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b30c4pyseed1
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b30c4py.txt
40
+ # "train: 0.3 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.7 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 1 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b30c4pyseed2.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b30c4pyseed2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b30c4py.txt
40
+ # "train: 0.3 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.7 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 2 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b30c4pyseed3.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b30c4pyseed3
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b30c4py.txt
40
+ # "train: 0.3 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.7 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 3 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b30c4pyseed4.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b30c4pyseed4
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b30c4py.txt
40
+ # "train: 0.3 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.7 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 4 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b40c4pyseed1.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b40c4pyseed1
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b40c4py.txt
40
+ # "train: 0.4 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.6 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 1 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b40c4pyseed2.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b40c4pyseed2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b40c4py.txt
40
+ # "train: 0.4 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.6 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 2 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b40c4pyseed3.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b40c4pyseed3
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b40c4py.txt
40
+ # "train: 0.4 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.6 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 3 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b40c4pyseed4.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b40c4pyseed4
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b40c4py.txt
40
+ # "train: 0.4 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.6 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 4 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b50c4pyseed1.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b50c4pyseed1
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b50c4py.txt
40
+ # "train: 0.5 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.5 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 1 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b50c4pyseed2.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b50c4pyseed2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b50c4py.txt
40
+ # "train: 0.5 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.5 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 2 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b50c4pyseed3.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b50c4pyseed3
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b50c4py.txt
40
+ # "train: 0.5 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.5 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 3 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b50c4pyseed4.sh ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b50c4pyseed4
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b50c4py.txt
40
+ # "train: 0.5 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.5 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+
45
+ PP_SIZE=1
46
+ TP_SIZE=2
47
+
48
+ MICRO_BATCH_SIZE=2
49
+ GRADIENT_ACCUMULATION_STEPS=1
50
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
51
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
52
+
53
+ # Model parameters
54
+ source model_params.sh
55
+ MODEL_PARAM=("${PARAM_4516M[@]}")
56
+ NHIDDEN=${MODEL_PARAM[0]}
57
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
58
+ KV_SIZE=${MODEL_PARAM[2]}
59
+ NHEADS=${MODEL_PARAM[3]}
60
+ NLAYERS=${MODEL_PARAM[4]}
61
+ SEQ_LEN=2048
62
+
63
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
64
+
65
+ SAVE_INTERVAL=10000
66
+
67
+ # Tokens: 84_000_000_000
68
+ # -> Samples: 41_015_625.0
69
+ TRAIN_SAMPLES=41_015_625
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.95 \
75
+ --adam-eps 1e-8 \
76
+ --lr 2e-4 \
77
+ --min-lr 2e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples $TRAIN_SAMPLES \
80
+ --lr-warmup-samples 410_156 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --kv-channels $KV_SIZE \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_SAMPLES \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --clip-grad 1.0 \
99
+ --kill-switch-path $KILL_SWITCH_PATH \
100
+ --bf16 \
101
+ $OPTIMIZER_ARGS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 10 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 5000 \
108
+ --eval-iters 10 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+
116
+ ZERO_STAGE=0
117
+
118
+ mkdir -p ds_configs
119
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
120
+
121
+ cat <<EOF > $DS_CONFIG_PATH
122
+ {
123
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
124
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
125
+ "gradient_clipping": 1.0,
126
+ "zero_optimization": {
127
+ "stage": $ZERO_STAGE
128
+ },
129
+ "bf16": {
130
+ "enabled": true
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOF
136
+
137
+ DEEPSPEED_ARGS=" \
138
+ --deepspeed \
139
+ --deepspeed_config $DS_CONFIG_PATH \
140
+ --zero-stage $ZERO_STAGE \
141
+ "
142
+
143
+ CMD=" \
144
+ Megatron-DeepSpeed/pretrain_gpt.py \
145
+ --tensor-model-parallel-size $TP_SIZE \
146
+ --pipeline-model-parallel-size $PP_SIZE \
147
+ $GPT_ARGS \
148
+ $OUTPUT_ARGS \
149
+ --save $CHECKPOINT_PATH \
150
+ --load $CHECKPOINT_PATH \
151
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
152
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
153
+ --data-impl mmap \
154
+ $DEEPSPEED_ARGS \
155
+ --seed 4 \
156
+ "
157
+
158
+ echo $CMD
159
+
160
+ echo "START $SLURM_JOBID: $(date)"
161
+
162
+ # bash launch_srun.sh $CMD
163
+ srun --label launch.sh $CMD
164
+
165
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b60c4pyseed1.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b60c4pyseed1
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b60c4py.txt
40
+ # "train: 0.6 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.4 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 1 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b60c4pyseed2.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b60c4pyseed2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b60c4py.txt
40
+ # "train: 0.6 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.4 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 2 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b60c4pyseed3.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b60c4pyseed3
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b60c4py.txt
40
+ # "train: 0.6 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.4 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 3 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b60c4pyseed4.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b60c4pyseed4
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b60c4py.txt
40
+ # "train: 0.6 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.4 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 4 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b70c4pyseed1.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b70c4pyseed1
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b70c4py.txt
40
+ # "train: 0.7 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.3 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 1 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b70c4pyseed2.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b70c4pyseed2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b70c4py.txt
40
+ # "train: 0.7 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.3 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 2 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b70c4pyseed3.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b70c4pyseed3
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b70c4py.txt
40
+ # "train: 0.7 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.3 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 3 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b70c4pyseed4.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b70c4pyseed4
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b70c4py.txt
40
+ # "train: 0.7 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.3 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 4 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b80c4pyseed1.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b80c4pyseed1
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b80c4py.txt
40
+ # "train: 0.8 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.2 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 1 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b80c4pyseed2.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b80c4pyseed2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b80c4py.txt
40
+ # "train: 0.8 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.2 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 2 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b80c4pyseed3.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b80c4pyseed3
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b80c4py.txt
40
+ # "train: 0.8 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.2 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 3 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b80c4pyseed4.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b80c4pyseed4
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b80c4py.txt
40
+ # "train: 0.8 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.2 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 4 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b90c4pyseed1.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b90c4pyseed1
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b90c4py.txt
40
+ # "train: 0.9 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.1 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 1 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b90c4pyseed2.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b90c4pyseed2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b90c4py.txt
40
+ # "train: 0.9 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.1 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 2 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b90c4pyseed3.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b90c4pyseed3
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b90c4py.txt
40
+ # "train: 0.9 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.1 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 3 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84b90c4pyseed4.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84b90c4pyseed4
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84b90c4py.txt
40
+ # "train: 0.9 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_84B_text_document, 0.1 0:1 /scratch/project_462000119/data/python/gpt2tok_python_84B_content_document"
41
+ VALID_DATA_PATH=valc4py.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document" "validation_python: 1.0 0.95:1 /scratch/project_462000119/data/python/gpt2tok_python_content_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 5000 \
107
+ --eval-iters 10 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ --seed 4 \
155
+ "
156
+
157
+ echo $CMD
158
+
159
+ echo "START $SLURM_JOBID: $(date)"
160
+
161
+ # bash launch_srun.sh $CMD
162
+ srun --label launch.sh $CMD
163
+
164
+ echo "END $SLURM_JOBID: $(date)"
tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683756022.nid007048.61333.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d67bc92ca2ba02857de441b9e358b7c0c23a3e7e4d20c822a50f4bb41c035023
3
+ size 19996
tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683756633.nid006995.89119.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0333176e62af9844e2b9390582b8da6291daa668b619e3105c0df66e4961820
3
+ size 113224610
tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683928600.nid007131.83743.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c5e6659617a6329ad7bdfe6575be5e875bdff89b84f293b3299f6c735079f4d
3
+ size 30411691
tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683974921.nid006671.116485.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c44a801835b83dc6284b98fee71d9599e2b78bddd36d063132446d76881a74f0
3
+ size 40
tensorboard/tensorboard_4b284b84b10c4pyseed1/events.out.tfevents.1683978316.nid006500.80744.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5aad21bc9610e6b6c11f76b3b02aa534d7d3a6b8787b9f3ba60a32cb5fd98db
3
+ size 216295
tensorboard/tensorboard_4b284b84b10c4pyseed2/events.out.tfevents.1683756022.nid005878.51908.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a62915fb10088dff87555a23741c81f59215fb2f8c6e29c529d3858a7f393696
3
+ size 113531666
tensorboard/tensorboard_4b284b84b10c4pyseed2/events.out.tfevents.1683928065.nid006518.16895.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0736d9395b5347605e7b441f2eae463bc34465241b388aa5fe225eb35ed502f1
3
+ size 30104633
tensorboard/tensorboard_4b284b84b10c4pyseed2/events.out.tfevents.1683973531.nid007019.108443.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f873da0be24a74805be74b1468d4068c6196cedcf95532dd401243b8e463e4c
3
+ size 22933
tensorboard/tensorboard_4b284b84b10c4pyseed2/events.out.tfevents.1683978316.nid006848.10586.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39618b3ba29c6ecf0c6a27d79d2a9a07f1afbc2010f478accc507e7e4bdad8f4
3
+ size 216295
tensorboard/tensorboard_4b284b84b10c4pyseed3/events.out.tfevents.1683756022.nid006518.29112.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f479f2724d0af83fbe9116768d933689c9102159cf721f25daed543a3a8c339e
3
+ size 113815604
tensorboard/tensorboard_4b284b84b10c4pyseed3/events.out.tfevents.1683928065.nid005878.6854.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf920e80db0cc8fbdd8691551a242c53a2a4e9aaddd8a7cc5dea7e663d7af02d
3
+ size 29820697
tensorboard/tensorboard_4b284b84b10c4pyseed3/events.out.tfevents.1683974293.nid006598.20858.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75e21748690b8a869904b54a47db62873fe4b586067ed450219a6b926f5b511d
3
+ size 22933
tensorboard/tensorboard_4b284b84b10c4pyseed3/events.out.tfevents.1683978316.nid007019.25418.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e0b8256e9a31b5290f0eb83c30f3aa5fb604a96a61631d1b13667d5d2edbb1e
3
+ size 216295
tensorboard/tensorboard_4b284b84b10c4pyseed4/events.out.tfevents.1683756022.nid006586.32254.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a32662873dc5f8d16b35c82bd2970c50aef0dc39a6a1b110a0b03c3dda84dca
3
+ size 113637713