Commit
·
5dc994e
1
Parent(s):
e4b9be7
Upload 12 files
Browse files- .gitattributes +2 -0
- vakyansh-wav2vec2-hindi-him-4200/.gitattributes +19 -0
- vakyansh-wav2vec2-hindi-him-4200/README.md +138 -0
- vakyansh-wav2vec2-hindi-him-4200/alphabet.json +1 -0
- vakyansh-wav2vec2-hindi-him-4200/config.json +81 -0
- vakyansh-wav2vec2-hindi-him-4200/language_model/attrs.json +1 -0
- vakyansh-wav2vec2-hindi-him-4200/language_model/kenLM.arpa +3 -0
- vakyansh-wav2vec2-hindi-him-4200/language_model/unigrams.txt +3 -0
- vakyansh-wav2vec2-hindi-him-4200/preprocessor_config.json +10 -0
- vakyansh-wav2vec2-hindi-him-4200/pytorch_model.bin +3 -0
- vakyansh-wav2vec2-hindi-him-4200/special_tokens_map.json +1 -0
- vakyansh-wav2vec2-hindi-him-4200/tokenizer_config.json +1 -0
- vakyansh-wav2vec2-hindi-him-4200/vocab.json +1 -0
.gitattributes
CHANGED
@@ -32,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
vakyansh-wav2vec2-hindi-him-4200/language_model/kenLM.arpa filter=lfs diff=lfs merge=lfs -text
|
36 |
+
vakyansh-wav2vec2-hindi-him-4200/language_model/unigrams.txt filter=lfs diff=lfs merge=lfs -text
|
vakyansh-wav2vec2-hindi-him-4200/.gitattributes
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
18 |
+
language_model/kenLM.arpa filter=lfs diff=lfs merge=lfs -text
|
19 |
+
language_model/unigrams.txt filter=lfs diff=lfs merge=lfs -text
|
vakyansh-wav2vec2-hindi-him-4200/README.md
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: hi
|
3 |
+
#datasets:
|
4 |
+
#- Interspeech 2021
|
5 |
+
metrics:
|
6 |
+
- wer
|
7 |
+
tags:
|
8 |
+
- audio
|
9 |
+
- automatic-speech-recognition
|
10 |
+
- speech
|
11 |
+
license: mit
|
12 |
+
model-index:
|
13 |
+
- name: Wav2Vec2 Vakyansh Hindi Model by Harveen Chadha
|
14 |
+
results:
|
15 |
+
- task:
|
16 |
+
name: Speech Recognition
|
17 |
+
type: automatic-speech-recognition
|
18 |
+
dataset:
|
19 |
+
name: Common Voice hi
|
20 |
+
type: common_voice
|
21 |
+
args: hi
|
22 |
+
metrics:
|
23 |
+
- name: Test WER
|
24 |
+
type: wer
|
25 |
+
value: 33.17
|
26 |
+
---
|
27 |
+
|
28 |
+
## Spaces Demo
|
29 |
+
Check the spaces demo [here](https://huggingface.co/spaces/Harveenchadha/wav2vec2-vakyansh-hindi/tree/main)
|
30 |
+
|
31 |
+
## Pretrained Model
|
32 |
+
|
33 |
+
Fine-tuned on Multilingual Pretrained Model [CLSRIL-23](https://arxiv.org/abs/2107.07402). The original fairseq checkpoint is present [here](https://github.com/Open-Speech-EkStep/vakyansh-models). When using this model, make sure that your speech input is sampled at 16kHz.
|
34 |
+
|
35 |
+
**Note: The result from this model is without a language model so you may witness a higher WER in some cases.**
|
36 |
+
|
37 |
+
## Dataset
|
38 |
+
|
39 |
+
This model was trained on 4200 hours of Hindi Labelled Data. The labelled data is not present in public domain as of now.
|
40 |
+
|
41 |
+
## Training Script
|
42 |
+
|
43 |
+
Models were trained using experimental platform setup by Vakyansh team at Ekstep. Here is the [training repository](https://github.com/Open-Speech-EkStep/vakyansh-wav2vec2-experimentation).
|
44 |
+
|
45 |
+
In case you want to explore training logs on wandb they are [here](https://wandb.ai/harveenchadha/hindi_finetuning_multilingual?workspace=user-harveenchadha).
|
46 |
+
|
47 |
+
|
48 |
+
## [Colab Demo](https://colab.research.google.com/github/harveenchadha/bol/blob/main/demos/hf/hindi/hf_hindi_him_4200_demo.ipynb)
|
49 |
+
|
50 |
+
## Usage
|
51 |
+
|
52 |
+
The model can be used directly (without a language model) as follows:
|
53 |
+
|
54 |
+
```python
|
55 |
+
import soundfile as sf
|
56 |
+
import torch
|
57 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
58 |
+
import argparse
|
59 |
+
|
60 |
+
def parse_transcription(wav_file):
|
61 |
+
# load pretrained model
|
62 |
+
processor = Wav2Vec2Processor.from_pretrained("Harveenchadha/vakyansh-wav2vec2-hindi-him-4200")
|
63 |
+
model = Wav2Vec2ForCTC.from_pretrained("Harveenchadha/vakyansh-wav2vec2-hindi-him-4200")
|
64 |
+
|
65 |
+
# load audio
|
66 |
+
audio_input, sample_rate = sf.read(wav_file)
|
67 |
+
|
68 |
+
# pad input values and return pt tensor
|
69 |
+
input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values
|
70 |
+
|
71 |
+
# INFERENCE
|
72 |
+
# retrieve logits & take argmax
|
73 |
+
logits = model(input_values).logits
|
74 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
75 |
+
|
76 |
+
# transcribe
|
77 |
+
transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
|
78 |
+
print(transcription)
|
79 |
+
|
80 |
+
```
|
81 |
+
|
82 |
+
|
83 |
+
## Evaluation
|
84 |
+
The model can be evaluated as follows on the hindi test data of Common Voice.
|
85 |
+
|
86 |
+
```python
|
87 |
+
|
88 |
+
import torch
|
89 |
+
import torchaudio
|
90 |
+
from datasets import load_dataset, load_metric
|
91 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
92 |
+
import re
|
93 |
+
|
94 |
+
test_dataset = load_dataset("common_voice", "hi", split="test")
|
95 |
+
wer = load_metric("wer")
|
96 |
+
|
97 |
+
processor = Wav2Vec2Processor.from_pretrained("Harveenchadha/vakyansh-wav2vec2-hindi-him-4200")
|
98 |
+
model = Wav2Vec2ForCTC.from_pretrained("Harveenchadha/vakyansh-wav2vec2-hindi-him-4200")
|
99 |
+
model.to("cuda")
|
100 |
+
|
101 |
+
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
102 |
+
|
103 |
+
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“]'
|
104 |
+
|
105 |
+
# Preprocessing the datasets.
|
106 |
+
# We need to read the aduio files as arrays
|
107 |
+
def speech_file_to_array_fn(batch):
|
108 |
+
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
|
109 |
+
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
110 |
+
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
111 |
+
return batch
|
112 |
+
|
113 |
+
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
114 |
+
|
115 |
+
# Preprocessing the datasets.
|
116 |
+
# We need to read the aduio files as arrays
|
117 |
+
def evaluate(batch):
|
118 |
+
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
119 |
+
|
120 |
+
with torch.no_grad():
|
121 |
+
logits = model(inputs.input_values.to("cuda")).logits
|
122 |
+
|
123 |
+
pred_ids = torch.argmax(logits, dim=-1)
|
124 |
+
batch["pred_strings"] = processor.batch_decode(pred_ids, skip_special_tokens=True)
|
125 |
+
return batch
|
126 |
+
|
127 |
+
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
128 |
+
|
129 |
+
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
130 |
+
|
131 |
+
```
|
132 |
+
|
133 |
+
**Test Result**: 33.17 %
|
134 |
+
|
135 |
+
[**Colab Evaluation**](https://colab.research.google.com/github/harveenchadha/bol/blob/main/demos/hf/hindi/hf_vakyansh_hindi_him_4200_evaluation_common_voice.ipynb)
|
136 |
+
|
137 |
+
## Credits
|
138 |
+
Thanks to Ekstep Foundation for making this possible. The vakyansh team will be open sourcing speech models in all the Indic Languages.
|
vakyansh-wav2vec2-hindi-him-4200/alphabet.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"labels": ["<s>", "", "</s>", "\u2047", " ", "\u0901", "\u0902", "\u0903", "\u0905", "\u0906", "\u0907", "\u0908", "\u0909", "\u090a", "\u090b", "\u090f", "\u0910", "\u0911", "\u0913", "\u0914", "\u0915", "\u0916", "\u0917", "\u0918", "\u0919", "\u091a", "\u091b", "\u091c", "\u091d", "\u091e", "\u091f", "\u0920", "\u0921", "\u0922", "\u0923", "\u0924", "\u0925", "\u0926", "\u0927", "\u0928", "\u092a", "\u092b", "\u092c", "\u092d", "\u092e", "\u092f", "\u0930", "\u0932", "\u0935", "\u0936", "\u0937", "\u0938", "\u0939", "\u093c", "\u093e", "\u093f", "\u0940", "\u0941", "\u0942", "\u0943", "\u0945", "\u0947", "\u0948", "\u0949", "\u094b", "\u094c", "\u094d"], "is_bpe": false}
|
vakyansh-wav2vec2-hindi-him-4200/config.json
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.1,
|
3 |
+
"apply_spec_augment": true,
|
4 |
+
"architectures": [
|
5 |
+
"Wav2Vec2ForCTC"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"bos_token": "<s>",
|
9 |
+
"bos_token_id": 0,
|
10 |
+
"codevector_dim": 256,
|
11 |
+
"contrastive_logits_temperature": 0.1,
|
12 |
+
"conv_bias": false,
|
13 |
+
"conv_dim": [
|
14 |
+
512,
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512
|
21 |
+
],
|
22 |
+
"conv_kernel": [
|
23 |
+
10,
|
24 |
+
3,
|
25 |
+
3,
|
26 |
+
3,
|
27 |
+
3,
|
28 |
+
2,
|
29 |
+
2
|
30 |
+
],
|
31 |
+
"conv_stride": [
|
32 |
+
5,
|
33 |
+
2,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2,
|
37 |
+
2,
|
38 |
+
2
|
39 |
+
],
|
40 |
+
"ctc_loss_reduction": "sum",
|
41 |
+
"ctc_zero_infinity": false,
|
42 |
+
"diversity_loss_weight": 0.1,
|
43 |
+
"do_lower_case": false,
|
44 |
+
"do_stable_layer_norm": false,
|
45 |
+
"eos_token": "</s>",
|
46 |
+
"eos_token_id": 2,
|
47 |
+
"feat_extract_activation": "gelu",
|
48 |
+
"feat_extract_norm": "group",
|
49 |
+
"feat_proj_dropout": 0.1,
|
50 |
+
"feat_quantizer_dropout": 0.0,
|
51 |
+
"final_dropout": 0.1,
|
52 |
+
"gradient_checkpointing": false,
|
53 |
+
"hidden_act": "gelu",
|
54 |
+
"hidden_dropout": 0.1,
|
55 |
+
"hidden_size": 768,
|
56 |
+
"initializer_range": 0.02,
|
57 |
+
"intermediate_size": 3072,
|
58 |
+
"layer_norm_eps": 1e-05,
|
59 |
+
"layerdrop": 0.1,
|
60 |
+
"mask_feature_length": 10,
|
61 |
+
"mask_feature_prob": 0.0,
|
62 |
+
"mask_time_length": 10,
|
63 |
+
"mask_time_prob": 0.05,
|
64 |
+
"model_type": "wav2vec2",
|
65 |
+
"num_attention_heads": 12,
|
66 |
+
"num_codevector_groups": 2,
|
67 |
+
"num_codevectors_per_group": 320,
|
68 |
+
"num_conv_pos_embedding_groups": 16,
|
69 |
+
"num_conv_pos_embeddings": 128,
|
70 |
+
"num_feat_extract_layers": 7,
|
71 |
+
"num_hidden_layers": 12,
|
72 |
+
"num_negatives": 100,
|
73 |
+
"pad_token": "[PAD]",
|
74 |
+
"pad_token_id": 1,
|
75 |
+
"proj_codevector_dim": 256,
|
76 |
+
"torch_dtype": "float32",
|
77 |
+
"transformers_version": "4.9.1",
|
78 |
+
"unk_token": "[UNK]",
|
79 |
+
"vocab_size": 67,
|
80 |
+
"word_delimiter_token": "|"
|
81 |
+
}
|
vakyansh-wav2vec2-hindi-him-4200/language_model/attrs.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
vakyansh-wav2vec2-hindi-him-4200/language_model/kenLM.arpa
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec083e80a037ea5e4239974b2daae368ade0f89e9477f075cc0e15e0a77eb87c
|
3 |
+
size 2008446585
|
vakyansh-wav2vec2-hindi-him-4200/language_model/unigrams.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d5d4d03ad1c72ebc85c930a5fa7d5e0730a6d352a633f0fd0e5d93760053338
|
3 |
+
size 11053241
|
vakyansh-wav2vec2-hindi-him-4200/preprocessor_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0,
|
7 |
+
"return_attention_mask": false,
|
8 |
+
"sampling_rate": 16000,
|
9 |
+
"processor_class": "Wav2Vec2ProcessorWithLM"
|
10 |
+
}
|
vakyansh-wav2vec2-hindi-him-4200/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9bb46884e079e83cdc30e45b931f35f45bda2f4b74bf82fed1d2630ca25603a0
|
3 |
+
size 377774679
|
vakyansh-wav2vec2-hindi-him-4200/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
vakyansh-wav2vec2-hindi-him-4200/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vakyansh-wav2vec2-hindi-him-4200/vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, "|":4, "ँ": 5, "ं": 6, "ः": 7, "अ": 8, "आ": 9, "इ": 10, "ई": 11, "उ": 12, "ऊ": 13, "ऋ": 14, "ए": 15, "ऐ": 16, "ऑ": 17, "ओ": 18, "औ": 19, "क": 20, "ख": 21, "ग": 22, "घ": 23, "ङ": 24, "च": 25, "छ": 26, "ज": 27, "झ": 28, "ञ": 29, "ट": 30, "ठ": 31, "ड": 32, "ढ": 33, "ण": 34, "त": 35, "थ": 36, "द": 37, "ध": 38, "न": 39, "प": 40, "फ": 41, "ब": 42, "भ": 43, "म": 44, "य": 45, "र": 46, "ल": 47, "व": 48, "श": 49, "ष": 50, "स": 51, "ह": 52, "़": 53, "ा": 54, "ि": 55, "ी": 56, "ु": 57, "ू": 58, "ृ": 59, "ॅ": 60, "े": 61, "ै": 62, "ॉ": 63, "ो": 64, "ौ": 65, "्": 66}
|