suhas-hegde5 commited on
Commit
0c65694
·
verified ·
1 Parent(s): c00f17c

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-large-lv60
5
  tags:
 
 
6
  - generated_from_trainer
7
  datasets:
8
  - librispeech_asr
@@ -15,15 +17,15 @@ model-index:
15
  name: Automatic Speech Recognition
16
  type: automatic-speech-recognition
17
  dataset:
18
- name: librispeech_asr
19
  type: librispeech_asr
20
  config: clean
21
  split: test
22
- args: clean
23
  metrics:
24
  - name: Wer
25
  type: wer
26
- value: 1.0237341772151898
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -31,10 +33,10 @@ should probably proofread and complete it, then remove this comment. -->
31
 
32
  # wav2vec2-librispeech-demo
33
 
34
- This model is a fine-tuned version of [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) on the librispeech_asr dataset.
35
  It achieves the following results on the evaluation set:
36
- - Loss: 0.0031
37
- - Wer: 1.0237
38
 
39
  ## Model description
40
 
 
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-large-lv60
5
  tags:
6
+ - automatic-speech-recognition
7
+ - librispeech_asr
8
  - generated_from_trainer
9
  datasets:
10
  - librispeech_asr
 
17
  name: Automatic Speech Recognition
18
  type: automatic-speech-recognition
19
  dataset:
20
+ name: LIBRISPEECH_ASR - CLEAN
21
  type: librispeech_asr
22
  config: clean
23
  split: test
24
+ args: 'Config: clean, Training split: test, Eval split: test'
25
  metrics:
26
  - name: Wer
27
  type: wer
28
+ value: 1.0225474683544304
29
  ---
30
 
31
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
33
 
34
  # wav2vec2-librispeech-demo
35
 
36
+ This model is a fine-tuned version of [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) on the LIBRISPEECH_ASR - CLEAN dataset.
37
  It achieves the following results on the evaluation set:
38
+ - Loss: 0.0030
39
+ - Wer: 1.0225
40
 
41
  ## Model description
42
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 15.0,
3
+ "eval_loss": 0.00301616033539176,
4
+ "eval_runtime": 315.0397,
5
+ "eval_samples": 2528,
6
+ "eval_samples_per_second": 8.024,
7
+ "eval_steps_per_second": 1.003,
8
+ "eval_wer": 1.0225474683544304,
9
+ "total_flos": 7.984243073297488e+18,
10
+ "train_loss": 0.8373936769831533,
11
+ "train_runtime": 15099.3203,
12
+ "train_samples": 2528,
13
+ "train_samples_per_second": 2.511,
14
+ "train_steps_per_second": 0.157
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 15.0,
3
+ "eval_loss": 0.00301616033539176,
4
+ "eval_runtime": 315.0397,
5
+ "eval_samples": 2528,
6
+ "eval_samples_per_second": 8.024,
7
+ "eval_steps_per_second": 1.003,
8
+ "eval_wer": 1.0225474683544304
9
+ }
runs/Feb14_22-20-51_server1/events.out.tfevents.1739567371.server1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ac868ed3bcde6abfeb28a7bb2749551fe6893593282cb0357d64242d28add20
3
+ size 40
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 15.0,
3
+ "total_flos": 7.984243073297488e+18,
4
+ "train_loss": 0.8373936769831533,
5
+ "train_runtime": 15099.3203,
6
+ "train_samples": 2528,
7
+ "train_samples_per_second": 2.511,
8
+ "train_steps_per_second": 0.157
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 15.0,
5
+ "eval_steps": 100,
6
+ "global_step": 2370,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.6329113924050633,
13
+ "eval_loss": 3.940936803817749,
14
+ "eval_runtime": 314.2197,
15
+ "eval_samples_per_second": 8.045,
16
+ "eval_steps_per_second": 1.006,
17
+ "eval_wer": 1.0,
18
+ "step": 100
19
+ },
20
+ {
21
+ "epoch": 1.2658227848101267,
22
+ "eval_loss": 3.044066905975342,
23
+ "eval_runtime": 313.2217,
24
+ "eval_samples_per_second": 8.071,
25
+ "eval_steps_per_second": 1.009,
26
+ "eval_wer": 1.0,
27
+ "step": 200
28
+ },
29
+ {
30
+ "epoch": 1.8987341772151898,
31
+ "eval_loss": 2.9164648056030273,
32
+ "eval_runtime": 317.0806,
33
+ "eval_samples_per_second": 7.973,
34
+ "eval_steps_per_second": 0.997,
35
+ "eval_wer": 1.0,
36
+ "step": 300
37
+ },
38
+ {
39
+ "epoch": 2.5316455696202533,
40
+ "eval_loss": 1.4924770593643188,
41
+ "eval_runtime": 316.4362,
42
+ "eval_samples_per_second": 7.989,
43
+ "eval_steps_per_second": 0.999,
44
+ "eval_wer": 1.9968354430379747,
45
+ "step": 400
46
+ },
47
+ {
48
+ "epoch": 3.1645569620253164,
49
+ "grad_norm": 0.8539223670959473,
50
+ "learning_rate": 0.0002982,
51
+ "loss": 3.7012,
52
+ "step": 500
53
+ },
54
+ {
55
+ "epoch": 3.1645569620253164,
56
+ "eval_loss": 0.30101653933525085,
57
+ "eval_runtime": 312.8326,
58
+ "eval_samples_per_second": 8.081,
59
+ "eval_steps_per_second": 1.01,
60
+ "eval_wer": 1.9446202531645569,
61
+ "step": 500
62
+ },
63
+ {
64
+ "epoch": 3.7974683544303796,
65
+ "eval_loss": 0.17126257717609406,
66
+ "eval_runtime": 314.312,
67
+ "eval_samples_per_second": 8.043,
68
+ "eval_steps_per_second": 1.005,
69
+ "eval_wer": 1.8259493670886076,
70
+ "step": 600
71
+ },
72
+ {
73
+ "epoch": 4.430379746835443,
74
+ "eval_loss": 0.09897234290838242,
75
+ "eval_runtime": 312.6958,
76
+ "eval_samples_per_second": 8.085,
77
+ "eval_steps_per_second": 1.011,
78
+ "eval_wer": 1.6162974683544304,
79
+ "step": 700
80
+ },
81
+ {
82
+ "epoch": 5.063291139240507,
83
+ "eval_loss": 0.06915320456027985,
84
+ "eval_runtime": 317.6492,
85
+ "eval_samples_per_second": 7.958,
86
+ "eval_steps_per_second": 0.995,
87
+ "eval_wer": 1.5439082278481013,
88
+ "step": 800
89
+ },
90
+ {
91
+ "epoch": 5.69620253164557,
92
+ "eval_loss": 0.046260952949523926,
93
+ "eval_runtime": 321.8259,
94
+ "eval_samples_per_second": 7.855,
95
+ "eval_steps_per_second": 0.982,
96
+ "eval_wer": 1.423259493670886,
97
+ "step": 900
98
+ },
99
+ {
100
+ "epoch": 6.329113924050633,
101
+ "grad_norm": 0.26903435587882996,
102
+ "learning_rate": 0.00022026737967914436,
103
+ "loss": 0.1686,
104
+ "step": 1000
105
+ },
106
+ {
107
+ "epoch": 6.329113924050633,
108
+ "eval_loss": 0.038907092064619064,
109
+ "eval_runtime": 320.4174,
110
+ "eval_samples_per_second": 7.89,
111
+ "eval_steps_per_second": 0.986,
112
+ "eval_wer": 1.3469145569620253,
113
+ "step": 1000
114
+ },
115
+ {
116
+ "epoch": 6.962025316455696,
117
+ "eval_loss": 0.029044821858406067,
118
+ "eval_runtime": 320.649,
119
+ "eval_samples_per_second": 7.884,
120
+ "eval_steps_per_second": 0.986,
121
+ "eval_wer": 1.3101265822784811,
122
+ "step": 1100
123
+ },
124
+ {
125
+ "epoch": 7.594936708860759,
126
+ "eval_loss": 0.020351797342300415,
127
+ "eval_runtime": 316.4006,
128
+ "eval_samples_per_second": 7.99,
129
+ "eval_steps_per_second": 0.999,
130
+ "eval_wer": 1.1993670886075949,
131
+ "step": 1200
132
+ },
133
+ {
134
+ "epoch": 8.227848101265822,
135
+ "eval_loss": 0.016085166484117508,
136
+ "eval_runtime": 321.3591,
137
+ "eval_samples_per_second": 7.867,
138
+ "eval_steps_per_second": 0.983,
139
+ "eval_wer": 1.1839398734177216,
140
+ "step": 1300
141
+ },
142
+ {
143
+ "epoch": 8.860759493670885,
144
+ "eval_loss": 0.014270616695284843,
145
+ "eval_runtime": 314.8714,
146
+ "eval_samples_per_second": 8.029,
147
+ "eval_steps_per_second": 1.004,
148
+ "eval_wer": 1.1499208860759493,
149
+ "step": 1400
150
+ },
151
+ {
152
+ "epoch": 9.49367088607595,
153
+ "grad_norm": 1.2659544944763184,
154
+ "learning_rate": 0.00014005347593582887,
155
+ "loss": 0.0553,
156
+ "step": 1500
157
+ },
158
+ {
159
+ "epoch": 9.49367088607595,
160
+ "eval_loss": 0.011028471402823925,
161
+ "eval_runtime": 316.4518,
162
+ "eval_samples_per_second": 7.989,
163
+ "eval_steps_per_second": 0.999,
164
+ "eval_wer": 1.1459651898734178,
165
+ "step": 1500
166
+ },
167
+ {
168
+ "epoch": 10.126582278481013,
169
+ "eval_loss": 0.008157163858413696,
170
+ "eval_runtime": 315.7243,
171
+ "eval_samples_per_second": 8.007,
172
+ "eval_steps_per_second": 1.001,
173
+ "eval_wer": 1.0953322784810127,
174
+ "step": 1600
175
+ },
176
+ {
177
+ "epoch": 10.759493670886076,
178
+ "eval_loss": 0.008831300772726536,
179
+ "eval_runtime": 315.0431,
180
+ "eval_samples_per_second": 8.024,
181
+ "eval_steps_per_second": 1.003,
182
+ "eval_wer": 1.1119462025316456,
183
+ "step": 1700
184
+ },
185
+ {
186
+ "epoch": 11.39240506329114,
187
+ "eval_loss": 0.005905392114073038,
188
+ "eval_runtime": 319.3821,
189
+ "eval_samples_per_second": 7.915,
190
+ "eval_steps_per_second": 0.989,
191
+ "eval_wer": 1.0573575949367089,
192
+ "step": 1800
193
+ },
194
+ {
195
+ "epoch": 12.025316455696203,
196
+ "eval_loss": 0.005364276003092527,
197
+ "eval_runtime": 316.4189,
198
+ "eval_samples_per_second": 7.989,
199
+ "eval_steps_per_second": 0.999,
200
+ "eval_wer": 1.0510284810126582,
201
+ "step": 1900
202
+ },
203
+ {
204
+ "epoch": 12.658227848101266,
205
+ "grad_norm": 0.34013208746910095,
206
+ "learning_rate": 5.983957219251336e-05,
207
+ "loss": 0.0295,
208
+ "step": 2000
209
+ },
210
+ {
211
+ "epoch": 12.658227848101266,
212
+ "eval_loss": 0.004200654104351997,
213
+ "eval_runtime": 316.6068,
214
+ "eval_samples_per_second": 7.985,
215
+ "eval_steps_per_second": 0.998,
216
+ "eval_wer": 1.0356012658227849,
217
+ "step": 2000
218
+ },
219
+ {
220
+ "epoch": 13.291139240506329,
221
+ "eval_loss": 0.003917561378329992,
222
+ "eval_runtime": 317.6825,
223
+ "eval_samples_per_second": 7.958,
224
+ "eval_steps_per_second": 0.995,
225
+ "eval_wer": 1.035996835443038,
226
+ "step": 2100
227
+ },
228
+ {
229
+ "epoch": 13.924050632911392,
230
+ "eval_loss": 0.0033297832123935223,
231
+ "eval_runtime": 317.8207,
232
+ "eval_samples_per_second": 7.954,
233
+ "eval_steps_per_second": 0.994,
234
+ "eval_wer": 1.0268987341772151,
235
+ "step": 2200
236
+ },
237
+ {
238
+ "epoch": 14.556962025316455,
239
+ "eval_loss": 0.0030845776200294495,
240
+ "eval_runtime": 316.4074,
241
+ "eval_samples_per_second": 7.99,
242
+ "eval_steps_per_second": 0.999,
243
+ "eval_wer": 1.0237341772151898,
244
+ "step": 2300
245
+ },
246
+ {
247
+ "epoch": 15.0,
248
+ "step": 2370,
249
+ "total_flos": 7.984243073297488e+18,
250
+ "train_loss": 0.8373936769831533,
251
+ "train_runtime": 15099.3203,
252
+ "train_samples_per_second": 2.511,
253
+ "train_steps_per_second": 0.157
254
+ }
255
+ ],
256
+ "logging_steps": 500,
257
+ "max_steps": 2370,
258
+ "num_input_tokens_seen": 0,
259
+ "num_train_epochs": 15,
260
+ "save_steps": 400,
261
+ "stateful_callbacks": {
262
+ "TrainerControl": {
263
+ "args": {
264
+ "should_epoch_stop": false,
265
+ "should_evaluate": false,
266
+ "should_log": false,
267
+ "should_save": true,
268
+ "should_training_stop": true
269
+ },
270
+ "attributes": {}
271
+ }
272
+ },
273
+ "total_flos": 7.984243073297488e+18,
274
+ "train_batch_size": 8,
275
+ "trial_name": null,
276
+ "trial_params": null
277
+ }