de-francophones commited on
Commit
3de864c
·
verified ·
1 Parent(s): da43c74

Upload 38 files

Browse files
Files changed (39) hide show
  1. .gitattributes +29 -0
  2. README.md +316 -0
  3. human_eval/results.json +0 -0
  4. human_eval/results_detailed.json +3 -0
  5. monolingual/en.fragments +3 -0
  6. monolingual/en.sents.det +3 -0
  7. monolingual/en.sents.tok +3 -0
  8. multilingual/en-de.fragments.json +3 -0
  9. multilingual/en-de.intersect.json +3 -0
  10. multilingual/en-de.sents.json +3 -0
  11. multilingual/en-es.fragments.json +3 -0
  12. multilingual/en-es.intersect.json +3 -0
  13. multilingual/en-es.sents.json +3 -0
  14. multilingual/en-fr.fragments.json +3 -0
  15. multilingual/en-fr.intersect.json +3 -0
  16. multilingual/en-fr.sents.json +3 -0
  17. multilingual/en-pt_br.fragments.json +3 -0
  18. multilingual/en-pt_br.intersect.json +3 -0
  19. multilingual/en-pt_br.sents.json +3 -0
  20. multilingual/images.json +3 -0
  21. tasks/fill_in_the_blank/intersect.json +3 -0
  22. tasks/fill_in_the_blank/sents.json +3 -0
  23. tasks/fill_in_the_blank/splits.json +3 -0
  24. tasks/lexical_translation/en-de.dict.json +0 -0
  25. tasks/lexical_translation/en-de.intersect.json +0 -0
  26. tasks/lexical_translation/en-de.sents.json +3 -0
  27. tasks/lexical_translation/en-de.splits.json +0 -0
  28. tasks/lexical_translation/en-es.dict.json +0 -0
  29. tasks/lexical_translation/en-es.intersect.json +3 -0
  30. tasks/lexical_translation/en-es.sents.json +3 -0
  31. tasks/lexical_translation/en-es.splits.json +3 -0
  32. tasks/lexical_translation/en-fr.dict.json +0 -0
  33. tasks/lexical_translation/en-fr.intersect.json +0 -0
  34. tasks/lexical_translation/en-fr.sents.json +3 -0
  35. tasks/lexical_translation/en-fr.splits.json +3 -0
  36. tasks/lexical_translation/en-pt_br.dict.json +0 -0
  37. tasks/lexical_translation/en-pt_br.intersect.json +3 -0
  38. tasks/lexical_translation/en-pt_br.sents.json +3 -0
  39. tasks/lexical_translation/en-pt_br.splits.json +3 -0
.gitattributes CHANGED
@@ -56,3 +56,32 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ human_eval/results_detailed.json filter=lfs diff=lfs merge=lfs -text
60
+ monolingual/en.fragments filter=lfs diff=lfs merge=lfs -text
61
+ monolingual/en.sents.det filter=lfs diff=lfs merge=lfs -text
62
+ monolingual/en.sents.tok filter=lfs diff=lfs merge=lfs -text
63
+ multilingual/en-de.fragments.json filter=lfs diff=lfs merge=lfs -text
64
+ multilingual/en-de.intersect.json filter=lfs diff=lfs merge=lfs -text
65
+ multilingual/en-de.sents.json filter=lfs diff=lfs merge=lfs -text
66
+ multilingual/en-es.fragments.json filter=lfs diff=lfs merge=lfs -text
67
+ multilingual/en-es.intersect.json filter=lfs diff=lfs merge=lfs -text
68
+ multilingual/en-es.sents.json filter=lfs diff=lfs merge=lfs -text
69
+ multilingual/en-fr.fragments.json filter=lfs diff=lfs merge=lfs -text
70
+ multilingual/en-fr.intersect.json filter=lfs diff=lfs merge=lfs -text
71
+ multilingual/en-fr.sents.json filter=lfs diff=lfs merge=lfs -text
72
+ multilingual/en-pt_br.fragments.json filter=lfs diff=lfs merge=lfs -text
73
+ multilingual/en-pt_br.intersect.json filter=lfs diff=lfs merge=lfs -text
74
+ multilingual/en-pt_br.sents.json filter=lfs diff=lfs merge=lfs -text
75
+ multilingual/images.json filter=lfs diff=lfs merge=lfs -text
76
+ tasks/fill_in_the_blank/intersect.json filter=lfs diff=lfs merge=lfs -text
77
+ tasks/fill_in_the_blank/sents.json filter=lfs diff=lfs merge=lfs -text
78
+ tasks/fill_in_the_blank/splits.json filter=lfs diff=lfs merge=lfs -text
79
+ tasks/lexical_translation/en-de.sents.json filter=lfs diff=lfs merge=lfs -text
80
+ tasks/lexical_translation/en-es.intersect.json filter=lfs diff=lfs merge=lfs -text
81
+ tasks/lexical_translation/en-es.sents.json filter=lfs diff=lfs merge=lfs -text
82
+ tasks/lexical_translation/en-es.splits.json filter=lfs diff=lfs merge=lfs -text
83
+ tasks/lexical_translation/en-fr.sents.json filter=lfs diff=lfs merge=lfs -text
84
+ tasks/lexical_translation/en-fr.splits.json filter=lfs diff=lfs merge=lfs -text
85
+ tasks/lexical_translation/en-pt_br.intersect.json filter=lfs diff=lfs merge=lfs -text
86
+ tasks/lexical_translation/en-pt_br.sents.json filter=lfs diff=lfs merge=lfs -text
87
+ tasks/lexical_translation/en-pt_br.splits.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # `MultiSubs`: A Large-scale Multimodal and Multilingual Dataset
2
+
3
+
4
+ ## Version history
5
+
6
+ | Version | Description |
7
+ | ------------ | ------------------- |
8
+ | 2021.06 | First release |
9
+
10
+
11
+ ## Introduction
12
+
13
+ `MultiSubs` is a dataset of multilingual subtitles gathered from [the OPUS OpenSubtitles dataset](https://opus.nlpl.eu/OpenSubtitles.php), which in turn was sourced from [opensubtitles.org](http://www.opensubtitles.org/).
14
+ We have supplemented some text fragments (visually salient nouns for now) of the subtitles with web images.
15
+
16
+ Please refer to the paper below for a more detailed description of the dataset.
17
+
18
+
19
+ ## Citation
20
+
21
+ I hope that you do something useful and impactful with this dataset to really move the field forward, and not just publish papers for the sake of it.
22
+
23
+ Please cite the following paper if you use this dataset in your work:
24
+
25
+ Josiah Wang, Pranava Madhyastha, Josiel Figueiredo, Chiraag Lala, Lucia Specia (2021). MultiSubs: A Large-scale Multimodal and Multilingual Dataset. CoRR, abs/2103.01910. Available at: https://arxiv.org/abs/2103.01910
26
+
27
+ ```bibtex
28
+ @article{DBLP:journals/corr/abs-2103-01910,
29
+ author = {Josiah Wang and
30
+ Pranava Madhyastha and
31
+ Josiel Figueiredo and
32
+ Chiraag Lala and
33
+ Lucia Specia},
34
+ title = {MultiSubs: {A} Large-scale Multimodal and Multilingual Dataset},
35
+ journal = {CoRR},
36
+ volume = {abs/2103.01910},
37
+ year = {2021},
38
+ url = {https://arxiv.org/abs/2103.01910},
39
+ archivePrefix = {arXiv},
40
+ eprint = {2103.01910},
41
+ timestamp = {Thu, 04 Mar 2021 17:00:40 +0100},
42
+ biburl = {https://dblp.org/rec/journals/corr/abs-2103-01910.bib},
43
+ bibsource = {dblp computer science bibliography, https://dblp.org}
44
+ }
45
+ ```
46
+
47
+
48
+ ## Disclaimer
49
+
50
+ The MultiSubs dataset is provided as is.
51
+ As the dataset relies on external sources such as movie subtitles and BabelNet, the corpus itself may contain offensive langauge and images, such as curse words and images depicting nudity or sexual organs, among others.
52
+ While these are kept to a minimal, they inevitably exist.
53
+
54
+
55
+ ## Content
56
+
57
+ ### `monolingual/`
58
+
59
+ Contains the original English dataset as described in paragraph 1 of Section 3 in the paper.
60
+
61
+
62
+ ##### `en.sents.tok` and `en.sents.det`
63
+ Tokenised and detokenised versions of English sentences, each containing at least one occurrence of a fragment from `en.fragments` below.
64
+
65
+ Format: `sentence-id \t sentence` where `sentence-id` is `imdb-id#sentence-number`.
66
+
67
+ There are 10,241,617 English sentences (paragraph 1 of Section 3 in the paper).
68
+
69
+
70
+ ##### `en.fragments`
71
+
72
+ List of fragments extracted from English sentences that are (1) single word nouns; (2) have an imageability score of at least 500; (3) occur more than once.
73
+ There are 13,268,536 fragment instances and 4,099 unique tokens. See paragraph 1 of Section 3 in the paper.
74
+
75
+ Format:
76
+ `sentence-id \t fragment-start \t fragment-end \t fragment-type \t raw-fragment \t tokenised-fragment \t POS-sequence`
77
+
78
+ e.g. `417#1 \t 2 \t 5 \t N \t Trip \t trip \t NN`
79
+
80
+ - `fragment-start` and `fragment-end` are the start/end character indices for the fragment in `en.sents.det`.
81
+ - `fragment-type` is always `N` (single word nouns) for this release.
82
+ - `POS-sequence` are the PoS tag sequence for the fragment (a single tag for single word nouns).
83
+
84
+
85
+
86
+ ### `multilingual/`
87
+
88
+ ##### `en-[es|pt_br|fr|de].sents.json`
89
+
90
+ Aligned bilingual corpus of Spanish [es], (Brazilian) Portugese [pt_br], French [fr] and German [de] sentences respectively.
91
+
92
+ The JSON object is a dictionary, where the key is the ID of the aligned sentence, and the value is a dictionary in the following format:
93
+
94
+ ```json
95
+ {
96
+ "src": {
97
+ "det": "A detokenised sentence in the source language (English).",
98
+ "tok": "a tokenised sentence in the source language ( english ) ."
99
+ },
100
+
101
+ "trg": {
102
+ "det": "The aligned sentence in the target language, detokenised.",
103
+ "tok": "the aligned sentence in the target language , detokenised ."
104
+ }
105
+ }
106
+ ```
107
+
108
+
109
+ ##### `en-[es|pt_br|fr|de].fragments.json`
110
+
111
+ The fragments extracted from the sentences above.
112
+
113
+ The JSON object is a list, where each item in the list contains fragment in the following format:
114
+
115
+ ```json
116
+ { "sentId": "6617#en{7}:de{14}",
117
+ "srcSentId": "6617#7",
118
+ "srcCharStart": 4,
119
+ "srcCharEnd": 6,
120
+ "srcFragment": "man",
121
+ "trgFragment": "mann",
122
+ "trgTokenIndex": 2,
123
+ "trgFragmentList": ["mann", "mensch", "männer"],
124
+ "synsets": "bn:00001533n;bn:00044576n;bn:00053096n;bn:00053097n;bn:00053099n;bn:03478581n"
125
+ }
126
+ ```
127
+
128
+ - `srcCharStart` and `srcCharEnd` are the positions of the first and last character of the fragment in the *detokenised* English sentence respectively (starts from 0).
129
+
130
+ - `trgTokenIndex` is the position of the token in the *tokenised* sentence in the target language (starts from 0).
131
+
132
+ - `trgFragmentList` is a list of plausible (sense-disambiguated) translations for this fragment in the target language
133
+
134
+ - `synsets` are the inferred babelnet synset ID(s) for the fragment. Multiple synsets are separated by semicolons. This semicolon-separated-string can be used as the ID to query all images for this fragment in `images.json` below.
135
+
136
+
137
+ ##### `images.json`
138
+
139
+ A JSON dictionary.
140
+
141
+ Key: Babelnet synset IDs separated by semicolons (see above).
142
+ Value: List of image IDs associated with this set of synset IDs.
143
+
144
+
145
+ ##### `en-[es|pt_br|fr|de].intersect.json`
146
+
147
+ A JSON dictionary. Gives the ids of sentences in each *intersect_N* subset, as described in Section 3.1 of the paper.
148
+
149
+ Key: `"intersect1"`, `"intersect2"`, `"intersect3"`, `"intersect4"`
150
+
151
+ Value: List of sentence ids for each subset. The number of sentences should correspond to those reported in Table 1 of the paper.
152
+
153
+
154
+ ### `human_eval/`
155
+
156
+ Results of Human Evaluation of the "Gap Filling Game" (Section 5 of paper)
157
+
158
+ ##### `results.json`
159
+
160
+ Format:
161
+
162
+ ```json
163
+ {
164
+ challengeId: {
165
+ "subtitleId": id,
166
+ "userId": userId,
167
+ "consensus": "intersectN",
168
+ "word": correctWord,
169
+ "correctAttempt": howManyAttempts (0 if failed after 3 attempts)
170
+ "guess1": {"word": word, "score": score} ,
171
+ "guess2": {"word": word, "score": score},
172
+ "guess3": {"word": word, "score": score}
173
+ }
174
+ }
175
+ ```
176
+
177
+ - `"guess2"` and `"guess3"` may be absent depending on `"correctAttempt"`.
178
+
179
+
180
+ ##### `results_detailed.json`
181
+
182
+ This version has more details, including the sentences and image(s) shown to the user. There are 16 instances missing in this version compared to `results.json` - we have lost some of the information of these.
183
+
184
+ Additional fields:
185
+ - `"images": ["IMAGE1", "IMAGE2", "IMAGE3", ...]`
186
+ - `"leftContext": "The left portion of the sentence before the missing word"`
187
+ - `"rightContext": "The right portion of the sentence after the missing word"`
188
+
189
+ The first image in the `"images"` list is shown in Attempt 2 (one image).
190
+
191
+
192
+ ### `tasks/fill_in_the_blank/`
193
+
194
+ Dataset for the fill-in-the-blank task (Section 6.1)
195
+
196
+ ##### `sents.json`
197
+
198
+ A JSON list. Each item in the list is a dictionary representing a sentence for the fill-in-the-blank task.
199
+
200
+ There are 4383978 sentences in this file, although not all are used (only 4377772 are used)
201
+
202
+ Blanks are marked as `<_>` in the sentences.
203
+
204
+ Format for each item:
205
+
206
+ ```json
207
+ {"sentId": "417#en{2}:pt_br{2}",
208
+ "word": "hall",
209
+ "wordLower": "hall",
210
+ "sent": {
211
+ "det": "The astronomers are assembled in a large <_> embellished with instruments.",
212
+ "tok": "the astronomers are assembled in a large <_> embellished with instruments ."
213
+ },
214
+ "synsets": "bn:00004493n;bn:00042664n",
215
+ "intersect": "intersect=1",
216
+ "imageId": "4E6B2547DF16BB40DB0036159E1CBF0BA12127752D3C447E7CE8BFB3",
217
+ "srcSentId": "417#2",
218
+ "srcCharStart": 41,
219
+ "srcCharEnd": 44
220
+ }
221
+ ```
222
+
223
+ - `wordLower` is the lowercased version of the token.
224
+ - `intersect` gives the specific subset this sentence belongs to. You can retrieve a list of sentences belonging to a subset using `intersect.json` instead (below).
225
+ - `imageId` is the image randomly selected for the sentence, but keeping the training, test and validation images disjoint. This is described at the end of Section 6.1.2 in the paper. You may use this if you are training/testing a model that takes in a single image as input to ensure your experiments comparable. `imageId` may be `null` if not used in the split.
226
+
227
+
228
+
229
+ ##### `intersect.json`
230
+
231
+ A JSON dictionary, containing the list of sentences each `intersect{=N}` subset contain.
232
+
233
+ Keys: `"intersect=1"`, `"intersect=2"`, `"intersect=3"`, `"intersect=4"`
234
+
235
+ Value: List of indices, pointing to the sentences in `sents.json`
236
+
237
+
238
+ ##### `splits.json`
239
+
240
+ A JSON dictionary, containing the different train/test splits.
241
+
242
+ **Keys**:
243
+ Main splits:
244
+
245
+ - `"train"` - all training (4277772 instances)
246
+ - `"val"` - all validation (5000 instances)
247
+ - `"test"` - all test (5000 instances)
248
+
249
+ Training subsets (first paragraph of Section 6.1.4 in paper)
250
+ - `"trainIntersect=1"` - training subset where intersect=1 (2499265)
251
+ - `"trainIntersect=2"` - training subset where intersect=2 (1252886)
252
+ - `"trainIntersect=3"` - training subset where intersect=3 (462860)
253
+ - `"trainIntersect=4"` - training subset where intersect=4 (62761)
254
+
255
+ Validation and test subsets (second paragraph of Section 6.1.4 in paper). The test results reported in the paper are based on this subset.
256
+ - `"valSubset"` - subset of validation (3143)
257
+ - `"testSubset"` - subset of test set (3262)
258
+
259
+
260
+ **Values**: List of indices, pointing to the sentences in `sents.json` for each split.
261
+
262
+
263
+ ### `tasks/lexical_translation/`
264
+
265
+ Dataset for the lexical translation task (Section 6.2).
266
+
267
+ ##### `en-[es|pt_br|fr|de].sents.json`
268
+
269
+ Same as the fill-in-the-blank task above, with two additional keys:
270
+ - `"target"` for the exact word in the target language.
271
+ - `"positiveTargets"` for a list of acceptable words in the target language.
272
+
273
+
274
+ ##### `en-[es|pt_br|fr|de].intersect.json`
275
+
276
+ Same as the fill-in-the-blank task above.
277
+
278
+
279
+ ##### `en-[es|pt_br|fr|de].splits.json`
280
+
281
+ Same as the fill-in-the-blank task above.
282
+
283
+ Number of instances:
284
+
285
+ `es`
286
+ - `train`: 2356787
287
+ - `val`: 5000
288
+ - `test`: 5000
289
+ - `valSubset`: 3172
290
+ - `testSubset`: 3117
291
+
292
+ `pt_br`
293
+ - `train`: 1950455
294
+ - `val`: 5000
295
+ - `test`: 5000
296
+ - `valSubset`: 3084
297
+ - `testSubset`: 3167
298
+
299
+ `fr`
300
+ - `train`: 1143608
301
+ - `val`: 5000
302
+ - `test`: 5000
303
+ - `valSubset`: 2930
304
+ - `testSubset`: 2944
305
+
306
+ `de`
307
+ - `train`: 405759
308
+ - `val`: 5000
309
+ - `test`: 5000
310
+ - `valSubset`: 3047
311
+ - `testSubset`: 3007
312
+
313
+
314
+ ### `images`
315
+
316
+ The images are available to download as a separate zip file.
human_eval/results.json ADDED
The diff for this file is too large to render. See raw diff
 
human_eval/results_detailed.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9e82f4e55ac544f8710a4d66342323d21e8a2a73a80fa622bad5e0e1590b1a9
3
+ size 48325999
monolingual/en.fragments ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a47f4ee8d7ba959921803d9c5eaf67737e5fcdc3076a933f59ef2d2fed29a492
3
+ size 456783237
monolingual/en.sents.det ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf0caf19dbd7001c1f7ab19ff435bf099b90b9367608de4bcb153af6c1814ec6
3
+ size 582690019
monolingual/en.sents.tok ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:919ee2200d76a9c324ee479ea674e09522f91738f991555fe25a7811f3bfe4b6
3
+ size 603446476
multilingual/en-de.fragments.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7580c07ef0bc6f58f24d150f6ed2d0f14ca5ae1c26d7d71536e06138539c6da
3
+ size 125913578
multilingual/en-de.intersect.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b97d5bf2f0cd66175de13afebfed092be9c1bc5fa989f395723bb04d3d5b104
3
+ size 22241054
multilingual/en-de.sents.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61778e6a16a286716b955ac197d893f8659165f2e000a0a635c237ecd03ebf3f
3
+ size 142152546
multilingual/en-es.fragments.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d22f4d90baa6b911626d7e1849e1828586e25a389eda9db4a4ab1c953ac45bea
3
+ size 742274529
multilingual/en-es.intersect.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b8cb83a32aae6ba456f516256ff019fc484a547e31edb4e71ca2f757148a32b
3
+ size 98758043
multilingual/en-es.sents.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e731ec91136290055a7e6480b192a95b32657a65b2789d8af08ed96be9747f18
3
+ size 744968663
multilingual/en-fr.fragments.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4f694e1595b742bc53fb9d7c186de1b02e0f60e293c56813c3aa78af02b1b7e
3
+ size 381444033
multilingual/en-fr.intersect.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07c887823ec60733759ce9bc2d9ff7020bce994afaccc7f4561e7135efa13557
3
+ size 56139189
multilingual/en-fr.sents.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60859c4fe1d8f398ea03333c379c1f91926345590edb21000d32d4467951adf6
3
+ size 393213899
multilingual/en-pt_br.fragments.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb274f47449a7e49494935ea65bc241b05c5f14a5bb7d55006bb64e666576914
3
+ size 644021125
multilingual/en-pt_br.intersect.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc058d0c8ffddfa8784e63385d2a41f5b054e7a55bc4301e3ca818182bbeebe
3
+ size 97728192
multilingual/en-pt_br.sents.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06457bade0acd19e35962e8a53e6c3626b933f45e7f7e38fb5a578a4fee8c8f8
3
+ size 634820370
multilingual/images.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb1a63b663abaf065626c7a1c5a3ae2fa08a7c1a5d9ee2db5a90424f57dad8c1
3
+ size 15446848
tasks/fill_in_the_blank/intersect.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:376ea85d9fd42689c0f116fbb4beafecf044db0a7231530188f566c36516c3fa
3
+ size 38344760
tasks/fill_in_the_blank/sents.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e86e05562919ef9e88344d97e57554d75d38629b33e2aa00708a411f71f7b37c
3
+ size 1903266538
tasks/fill_in_the_blank/splits.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a596cc8a107145705e5b002d502154507e2a9a106c11c9250a7f8a9f413efc7b
3
+ size 74967388
tasks/lexical_translation/en-de.dict.json ADDED
The diff for this file is too large to render. See raw diff
 
tasks/lexical_translation/en-de.intersect.json ADDED
The diff for this file is too large to render. See raw diff
 
tasks/lexical_translation/en-de.sents.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83fce33d07f664964eb501ed411d321189feea481405c3ad83ef602e8f5861b1
3
+ size 201853492
tasks/lexical_translation/en-de.splits.json ADDED
The diff for this file is too large to render. See raw diff
 
tasks/lexical_translation/en-es.dict.json ADDED
The diff for this file is too large to render. See raw diff
 
tasks/lexical_translation/en-es.intersect.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c5623105c5bc45f8d6a3ac3cfdf40cbef43fe90afc6cafeb9da277d88b86a2f
3
+ size 20504555
tasks/lexical_translation/en-es.sents.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b86cb5b1995dc1dcf0915f9b513f3eb1d13f9b11cb3febf50cf990cd9b6a9134
3
+ size 1255613873
tasks/lexical_translation/en-es.splits.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e766f630ae97f3579f03868cc62d78871d01186cbf648e78d4ba3a6bffa3adfa
3
+ size 40379183
tasks/lexical_translation/en-fr.dict.json ADDED
The diff for this file is too large to render. See raw diff
 
tasks/lexical_translation/en-fr.intersect.json ADDED
The diff for this file is too large to render. See raw diff
 
tasks/lexical_translation/en-fr.sents.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ed30e171d14896c30cbcbb4c20643090a22507919429c686cc28d5b40f6271a
3
+ size 611272504
tasks/lexical_translation/en-fr.splits.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de9154e8d04db4c2837d7453c96178c0e921af499b9e8da4ce31249b458526d3
3
+ size 18538343
tasks/lexical_translation/en-pt_br.dict.json ADDED
The diff for this file is too large to render. See raw diff
 
tasks/lexical_translation/en-pt_br.intersect.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0a59f9352cf347abe1211c368774e93165a6848f83882060457a300b4771d6e
3
+ size 16761554
tasks/lexical_translation/en-pt_br.sents.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0f5291ae9f021f6c4584dca21486914f74d01009c12c25c806d386e3cb99f68
3
+ size 1063419544
tasks/lexical_translation/en-pt_br.splits.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c6984a5e1ead19ea26980d4a00b38a20723eeeff7d5b160de22a876bee101b6
3
+ size 33063189