aconeil commited on
Commit
69e130e
·
verified ·
1 Parent(s): 1e99c64

Upload tokenizer

Browse files
Files changed (3) hide show
  1. added_tokens.json +2 -2
  2. tokenizer_config.json +4 -5
  3. vocab.json +3 -4
added_tokens.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "</s>": 33,
3
- "<s>": 32
4
  }
 
1
  {
2
+ "</s>": 32,
3
+ "<s>": 31
4
  }
tokenizer_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "added_tokens_decoder": {
3
- "30": {
4
  "content": "[UNK]",
5
  "lstrip": true,
6
  "normalized": false,
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": false
10
  },
11
- "31": {
12
  "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
@@ -16,7 +16,7 @@
16
  "single_word": false,
17
  "special": false
18
  },
19
- "32": {
20
  "content": "<s>",
21
  "lstrip": false,
22
  "normalized": false,
@@ -24,7 +24,7 @@
24
  "single_word": false,
25
  "special": true
26
  },
27
- "33": {
28
  "content": "</s>",
29
  "lstrip": false,
30
  "normalized": false,
@@ -40,7 +40,6 @@
40
  "extra_special_tokens": {},
41
  "model_max_length": 1000000000000000019884624838656,
42
  "pad_token": "[PAD]",
43
- "processor_class": "Wav2Vec2BertProcessor",
44
  "replace_word_delimiter_char": " ",
45
  "target_lang": null,
46
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
 
1
  {
2
  "added_tokens_decoder": {
3
+ "29": {
4
  "content": "[UNK]",
5
  "lstrip": true,
6
  "normalized": false,
 
8
  "single_word": false,
9
  "special": false
10
  },
11
+ "30": {
12
  "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": false
18
  },
19
+ "31": {
20
  "content": "<s>",
21
  "lstrip": false,
22
  "normalized": false,
 
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "32": {
28
  "content": "</s>",
29
  "lstrip": false,
30
  "normalized": false,
 
40
  "extra_special_tokens": {},
41
  "model_max_length": 1000000000000000019884624838656,
42
  "pad_token": "[PAD]",
 
43
  "replace_word_delimiter_char": " ",
44
  "target_lang": null,
45
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
vocab.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "[": 1,
3
- "[PAD]": 31,
4
- "[UNK]": 30,
5
  "]": 2,
6
  "a": 3,
7
  "b": 4,
@@ -29,6 +29,5 @@
29
  "x": 26,
30
  "y": 27,
31
  "z": 28,
32
- "|": 0,
33
- "ạ": 29
34
  }
 
1
  {
2
  "[": 1,
3
+ "[PAD]": 30,
4
+ "[UNK]": 29,
5
  "]": 2,
6
  "a": 3,
7
  "b": 4,
 
29
  "x": 26,
30
  "y": 27,
31
  "z": 28,
32
+ "|": 0
 
33
  }