updated my model
Browse files- config.json +8 -8
- pytorch_model.bin +2 -2
- vocab.json +1 -1
config.json
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
{
|
2 |
"_name_or_path": "facebook/wav2vec2-large-xlsr-53",
|
3 |
-
"activation_dropout": 0.
|
4 |
"apply_spec_augment": true,
|
5 |
"architectures": [
|
6 |
"Wav2Vec2ForCTC"
|
7 |
],
|
8 |
-
"attention_dropout": 0.
|
9 |
"bos_token_id": 1,
|
10 |
"conv_bias": true,
|
11 |
"conv_dim": [
|
@@ -42,16 +42,16 @@
|
|
42 |
"feat_extract_activation": "gelu",
|
43 |
"feat_extract_dropout": 0.0,
|
44 |
"feat_extract_norm": "layer",
|
45 |
-
"feat_proj_dropout": 0.
|
46 |
"final_dropout": 0.0,
|
47 |
"gradient_checkpointing": true,
|
48 |
"hidden_act": "gelu",
|
49 |
-
"hidden_dropout": 0.
|
50 |
"hidden_size": 1024,
|
51 |
"initializer_range": 0.02,
|
52 |
"intermediate_size": 4096,
|
53 |
"layer_norm_eps": 1e-05,
|
54 |
-
"layerdrop": 0.
|
55 |
"mask_channel_length": 10,
|
56 |
"mask_channel_min_space": 1,
|
57 |
"mask_channel_other": 0.0,
|
@@ -62,7 +62,7 @@
|
|
62 |
"mask_time_length": 10,
|
63 |
"mask_time_min_space": 1,
|
64 |
"mask_time_other": 0.0,
|
65 |
-
"mask_time_prob": 0.
|
66 |
"mask_time_selection": "static",
|
67 |
"model_type": "wav2vec2",
|
68 |
"num_attention_heads": 16,
|
@@ -70,7 +70,7 @@
|
|
70 |
"num_conv_pos_embeddings": 128,
|
71 |
"num_feat_extract_layers": 7,
|
72 |
"num_hidden_layers": 24,
|
73 |
-
"pad_token_id":
|
74 |
"transformers_version": "4.4.0",
|
75 |
-
"vocab_size":
|
76 |
}
|
|
|
1 |
{
|
2 |
"_name_or_path": "facebook/wav2vec2-large-xlsr-53",
|
3 |
+
"activation_dropout": 0.055,
|
4 |
"apply_spec_augment": true,
|
5 |
"architectures": [
|
6 |
"Wav2Vec2ForCTC"
|
7 |
],
|
8 |
+
"attention_dropout": 0.094,
|
9 |
"bos_token_id": 1,
|
10 |
"conv_bias": true,
|
11 |
"conv_dim": [
|
|
|
42 |
"feat_extract_activation": "gelu",
|
43 |
"feat_extract_dropout": 0.0,
|
44 |
"feat_extract_norm": "layer",
|
45 |
+
"feat_proj_dropout": 0.04,
|
46 |
"final_dropout": 0.0,
|
47 |
"gradient_checkpointing": true,
|
48 |
"hidden_act": "gelu",
|
49 |
+
"hidden_dropout": 0.047,
|
50 |
"hidden_size": 1024,
|
51 |
"initializer_range": 0.02,
|
52 |
"intermediate_size": 4096,
|
53 |
"layer_norm_eps": 1e-05,
|
54 |
+
"layerdrop": 0.041,
|
55 |
"mask_channel_length": 10,
|
56 |
"mask_channel_min_space": 1,
|
57 |
"mask_channel_other": 0.0,
|
|
|
62 |
"mask_time_length": 10,
|
63 |
"mask_time_min_space": 1,
|
64 |
"mask_time_other": 0.0,
|
65 |
+
"mask_time_prob": 0.082,
|
66 |
"mask_time_selection": "static",
|
67 |
"model_type": "wav2vec2",
|
68 |
"num_attention_heads": 16,
|
|
|
70 |
"num_conv_pos_embeddings": 128,
|
71 |
"num_feat_extract_layers": 7,
|
72 |
"num_hidden_layers": 24,
|
73 |
+
"pad_token_id": 49,
|
74 |
"transformers_version": "4.4.0",
|
75 |
+
"vocab_size": 50
|
76 |
}
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f3a226cc02178fad03b0e8311c290e48dd677d144e66b148c221e43b4e104617
|
3 |
+
size 1262138839
|
vocab.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"
|
|
|
1 |
+
{"ف": 0, "ی": 1, "إ": 2, "ا": 3, "ز": 4, "ء": 5, "ک": 6, "م": 7, "ص": 8, "ع": 10, "ؤ": 11, "ذ": 12, "ج": 13, "ٰ": 14, "ك": 15, "ش": 16, "ت": 17, "ه": 18, "غ": 19, "آ": 20, "ض": 21, "ظ": 22, "ل": 23, "ث": 24, "ٌ": 25, "ي": 26, "أ": 27, "ِ": 28, "ر": 29, "ْ": 30, "ة": 31, "ى": 32, "د": 33, "ُ": 34, "ً": 35, "و": 36, "ق": 37, "خ": 38, "ن": 39, "ط": 40, "ح": 41, "ئ": 42, "ّ": 43, "س": 44, "ب": 45, "ٍ": 46, "َ": 47, "|": 9, "[UNK]": 48, "[PAD]": 49}
|