Shaomu commited on
Commit
bed0ae5
·
1 Parent(s): 616e676
toolkit/build_fairseq_sharded_dataset.sh ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --ntasks=1
4
+ #SBATCH --cpus-per-task=40
5
+ #SBATCH --job-name=process
6
+ #SBATCH --nodelist=ilps-cn002
7
+ #SBATCH --time=2-00:00:00
8
+ #SBATCH --mem=256G
9
+
10
+ pwd
11
+ conda info --envs
12
+ source /home/stan1/anaconda3/bin/activate fairseq
13
+ cd /ivi/ilps/projects/ltl-mt/EC40-dataset
14
+
15
+ mkdir spm_sharded
16
+
17
+
18
+ ######################## ------------ IMPORTRANT ------------ ########################
19
+
20
+ ######## This is an example of how to build a sharded dataset (5 shards)
21
+ ######## Before run the following code, you should have trained your sentencepiece/subword-mt tokenizer already
22
+ ######## Then you should encode the dataset using spm, and then use following code to split them to 5 shards
23
+
24
+ #### For eval set, the most easiest way is to add the whole eval-set to all 5 shard fairseq data folder
25
+ ### note: ha and kab is two exceptions (because of their data-size): you will find them in *SPECIAL*
26
+
27
+ ######################## ------------ IMPORTRANT ------------ ########################
28
+
29
+ SHARD_SUB_DIR=('0' '1' '2' '3' '4')
30
+ for i in "${!SHARD_SUB_DIR[@]}"; do
31
+ SUB_NUMBER=${SHARD_SUB_DIR[i]}
32
+ mkdir dataset/spm_sharded/shard${SUB_NUMBER}
33
+ done
34
+
35
+ HIGH=('de' 'nl' 'fr' 'es' 'ru' 'cs' 'hi' 'bn' 'ar' 'he')
36
+ MED=('sv' 'da' 'it' 'pt' 'pl' 'bg' 'kn' 'mr' 'mt') #ha
37
+ LOW=('af' 'lb' 'ro' 'oc' 'uk' 'sr' 'sd' 'gu' 'ti' 'am')
38
+ ELOW=('no' 'is' 'ast' 'ca' 'be' 'bs' 'ne' 'ur' 'so') #kab
39
+
40
+ SPM_DIR=dataset/spm
41
+ SPM_SHARD_DIR=dataset/spm_sharded
42
+
43
+ ##
44
+
45
+ ## HIGH 5m each file -> split to 1m for one shard
46
+ for i in "${!HIGH[@]}"; do
47
+ LANG=${HIGH[i]}
48
+ split -l 1000000 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
49
+ split -l 1000000 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard
50
+
51
+ for j in "${!SHARD_SUB_DIR[@]}"; do
52
+ SUB_NUMBER=${SHARD_SUB_DIR[j]}
53
+ mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
54
+ mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
55
+ done
56
+ done
57
+
58
+ # MED 1m each file -> split to 200K for one shard
59
+ for i in "${!MED[@]}"; do
60
+ LANG=${MED[i]}
61
+ split -l 200000 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
62
+ split -l 200000 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard
63
+
64
+ for j in "${!SHARD_SUB_DIR[@]}"; do
65
+ SUB_NUMBER=${SHARD_SUB_DIR[j]}
66
+ mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
67
+ mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
68
+ done
69
+ done
70
+
71
+ # LOW 100k each file -> split to 20k for one shard
72
+ for i in "${!LOW[@]}"; do
73
+ LANG=${LOW[i]}
74
+ split -l 20000 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
75
+ split -l 20000 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard
76
+
77
+ for j in "${!SHARD_SUB_DIR[@]}"; do
78
+ SUB_NUMBER=${SHARD_SUB_DIR[j]}
79
+ mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
80
+ mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
81
+ done
82
+ done
83
+
84
+ ## ELOW 50k each file -> split to 10k for one shard
85
+ for i in "${!ELOW[@]}"; do
86
+ LANG=${ELOW[i]}
87
+ split -l 10000 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
88
+ split -l 10000 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard
89
+
90
+ for j in "${!SHARD_SUB_DIR[@]}"; do
91
+ SUB_NUMBER=${SHARD_SUB_DIR[j]}
92
+ mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
93
+ mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
94
+ done
95
+ done
96
+
97
+ # SPECIAL HA 344000 -> split to 68800 for one shard
98
+ HA=('ha')
99
+ for i in "${!HA[@]}"; do
100
+ LANG=${HA[i]}
101
+ split -l 68800 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
102
+ split -l 68800 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard
103
+
104
+ for j in "${!SHARD_SUB_DIR[@]}"; do
105
+ SUB_NUMBER=${SHARD_SUB_DIR[j]}
106
+ mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
107
+ mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
108
+ done
109
+ done
110
+
111
+ # SPECIAL HA 18448 -> split to 3690 for one shard
112
+ KAB=('kab')
113
+ for i in "${!KAB[@]}"; do
114
+ LANG=${KAB[i]}
115
+ split -l 3690 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
116
+ split -l 3690 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard
117
+
118
+ for j in "${!SHARD_SUB_DIR[@]}"; do
119
+ SUB_NUMBER=${SHARD_SUB_DIR[j]}
120
+ mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
121
+ mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
122
+ done
123
+ done
124
+
125
+ # ------------------------ 4. Fairseq preparation Sharded ------------------------ #
126
+ SPM_DATA_DIR=dataset/spm_sharded
127
+ FAIRSEQ_DIR=dataset/fairseq-data-bin-sharded
128
+ mkdir ${FAIRSEQ_DIR}
129
+
130
+ cut -f1 dataset/spm/spm_64k.vocab | tail -n +4 | sed "s/$/ 100/g" > ${FAIRSEQ_DIR}/dict.txt
131
+
132
+ SHARD_SUB_DIR=('0' '1' '2' '3' '4')
133
+ for i in "${!SHARD_SUB_DIR[@]}"; do
134
+ SUB_NUMBER=${SHARD_SUB_DIR[i]}
135
+ mkdir $FAIRSEQ_DIR/shard${SUB_NUMBER}
136
+ done
137
+
138
+ # preprocess with mmap dataset
139
+ for SHARD in $(seq 0 4); do
140
+ SRC=en
141
+ for TGT in bg so ca da be bs mt es uk am hi ro no ti de cs lb pt nl mr is ne ur oc ast ha sv kab gu ar fr ru it pl sr sd he af kn bn; do
142
+ fairseq-preprocess \
143
+ --dataset-impl mmap \
144
+ --source-lang ${SRC} \
145
+ --target-lang ${TGT} \
146
+ --trainpref ${SPM_DATA_DIR}/shard${SHARD}/train.${SRC}-${TGT} \
147
+ --destdir ${FAIRSEQ_DIR}/shard${SHARD} \
148
+ --thresholdtgt 0 \
149
+ --thresholdsrc 0 \
150
+ --workers 40 \
151
+ --srcdict ${FAIRSEQ_DIR}/dict.txt \
152
+ --tgtdict ${FAIRSEQ_DIR}/dict.txt
153
+ cp ${FAIRSEQ_DIR}/dict.txt ${FAIRSEQ_DIR}/shard${SHARD}/dict.txt
154
+ done
155
+ done
toolkit/train-EC40-mTrans-large.sh ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --partition=gpu
4
+ #SBATCH --gres=gpu:4
5
+ #SBATCH --ntasks=4
6
+ #SBATCH --cpus-per-task=11
7
+ #SBATCH --job-name=m2m_base_shard
8
+ #SBATCH --nodelist=ilps-cn116
9
+ #SBATCH --time=20-00:00:00
10
+ #SBATCH --mem=250G
11
+ #SBATCH -o /ivi/ilps/personal/...m2m_baseline_monitor_new_large.o
12
+ #SBATCH -e /ivi/ilps/personal/...m2m_baseline_monitor_new_large.e
13
+
14
+ pwd
15
+ conda info --envs
16
+ source /home/stan1/anaconda3/bin/activate fairseq
17
+
18
+
19
+ fairseq-train fairseq-data-bin-sharded/shard0:fairseq-data-bin-sharded/shard1:fairseq-data-bin-sharded/shard2:fairseq-data-bin-sharded/shard3:fairseq-data-bin-sharded/shard4 \
20
+ --langs en,de,nl,sv,da,is,af,lb,no,fr,es,it,pt,ro,oc,ast,ca,ru,cs,pl,bg,uk,sr,be,bs,hi,bn,kn,mr,sd,gu,ne,ur,ar,he,ha,mt,ti,am,kab,so \
21
+ --lang-pairs en-de,en-nl,en-sv,en-da,en-is,en-af,en-lb,en-no,en-fr,en-es,en-it,en-pt,en-ro,en-oc,en-ast,en-ca,en-ru,en-cs,en-pl,en-bg,en-uk,en-sr,en-be,en-bs,en-hi,en-bn,en-kn,en-mr,en-sd,en-gu,en-ne,en-ur,en-ar,en-he,en-ha,en-mt,en-ti,en-am,en-kab,en-so,de-en,nl-en,sv-en,da-en,is-en,af-en,lb-en,no-en,fr-en,es-en,it-en,pt-en,ro-en,oc-en,ast-en,ca-en,ru-en,cs-en,pl-en,bg-en,uk-en,sr-en,be-en,bs-en,hi-en,bn-en,kn-en,mr-en,sd-en,gu-en,ne-en,ur-en,ar-en,he-en,ha-en,mt-en,ti-en,am-en,kab-en,so-en \
22
+ --encoder-langtok tgt \
23
+ --arch transformer_vaswani_wmt_en_de_big \
24
+ --encoder-normalize-before --decoder-normalize-before --layernorm-embedding \
25
+ --encoder-layers 12 --decoder-layers 12 \
26
+ --sampling-method temperature --sampling-temperature 5 \
27
+ --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
28
+ --skip-invalid-size-inputs-valid-test \
29
+ --max-tokens 10240 --update-freq 21 --max-update 900000 \
30
+ --share-all-embeddings \
31
+ --max-source-positions 256 --max-target-positions 256 \
32
+ --lr 0.0005 --lr-scheduler inverse_sqrt --warmup-updates 4000 \
33
+ --seed 1234 --patience 10 \
34
+ --optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 --weight-decay 0.0 \
35
+ --dropout 0.1 --attention-dropout 0.1 \
36
+ --fp16 --ddp-backend no_c10d \
37
+ --wandb-project 'EC40' \
38
+ --checkpoint-suffix _m2m_ --save-dir checkpoints/m2m_base_monitor_shard_new_large \
39
+ --save-interval-updates 2000 --keep-interval-updates 5 --no-epoch-checkpoints --log-interval 100 \
40
+ --distributed-world-size 4 --distributed-num-procs 44 --ddp-comm-hook fp16