ssdatar commited on
Commit
0cabfb8
·
1 Parent(s): 5982272

Training in progress, step 81500

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc73315c011c667fae5118de18857e215a6c140faeee78bbe45de306c97057e5
3
- size 501023389
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bffa40dd6e42b28764d59bfc7500d4a9ed9ea35d01e701ed52eadec35b03d92
3
+ size 2240706037
special_tokens_map.json CHANGED
@@ -1,30 +1,15 @@
1
  {
2
- "bos_token": {
3
- "content": "</s>",
4
- "lstrip": false,
5
- "normalized": true,
 
 
 
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "<pad>",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "unk_token": {
24
- "content": "</s>",
25
- "lstrip": false,
26
- "normalized": true,
27
- "rstrip": false,
28
- "single_word": false
29
- }
30
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,40 +1,19 @@
1
  {
2
- "add_bos_token": true,
3
- "add_prefix_space": false,
4
- "bos_token": {
5
- "__type": "AddedToken",
6
- "content": "</s>",
7
- "lstrip": false,
8
- "normalized": true,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
  "clean_up_tokenization_spaces": true,
13
- "eos_token": {
14
- "__type": "AddedToken",
15
- "content": "</s>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
- "errors": "replace",
22
- "model_max_length": 1000000000000000019884624838656,
23
- "pad_token": {
24
  "__type": "AddedToken",
25
- "content": "<pad>",
26
- "lstrip": false,
27
  "normalized": true,
28
  "rstrip": false,
29
  "single_word": false
30
  },
31
- "tokenizer_class": "GPT2Tokenizer",
32
- "unk_token": {
33
- "__type": "AddedToken",
34
- "content": "</s>",
35
- "lstrip": false,
36
- "normalized": true,
37
- "rstrip": false,
38
- "single_word": false
39
- }
40
  }
 
1
  {
2
+ "bos_token": "<s>",
 
 
 
 
 
 
 
 
 
3
  "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
 
 
 
 
 
 
 
 
7
  "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
  "normalized": true,
11
  "rstrip": false,
12
  "single_word": false
13
  },
14
+ "model_max_length": 512,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "tokenizer_class": "XLMRobertaTokenizer",
18
+ "unk_token": "<unk>"
 
 
 
 
19
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53313092d109a391e0898b0bee6844ea392965c8c5bf148cfbb0c514976d9e88
3
  size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5008108e53cdb8e7f240c97ba92b03b41d129ae0ed84af3900966084794ad7c8
3
  size 4027