rtakeda commited on
Commit
a184936
·
verified ·
1 Parent(s): 861552a

Upload 7 files

Browse files
README.md CHANGED
@@ -1,3 +1,58 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ {}
3
+ ---
4
+ ## ESPnet2 ASR model
5
+
6
+ ### `ouktlab/espnet_asr-ja-kc-stream_am-transformer-robustcorpus10_lm-transformer-corpus10-bccwj-wiki40b`
7
+
8
+ This model was trained using csj recipe in [espnet](https://github.com/espnet/espnet/).
9
+
10
+ ### Citing ESPnet
11
+
12
+ ```BibTex
13
+ @inproceedings{watanabe2018espnet,
14
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
15
+ title={{ESPnet}: End-to-End Speech Processing Toolkit},
16
+ year={2018},
17
+ booktitle={Proceedings of Interspeech},
18
+ pages={2207--2211},
19
+ doi={10.21437/Interspeech.2018-1456},
20
+ url={http://dx.doi.org/10.21437/Interspeech.2018-1456}
21
+ }
22
+
23
+ ```
24
+
25
+ or arXiv:
26
+
27
+ ```bibtex
28
+ @misc{watanabe2018espnet,
29
+ title={ESPnet: End-to-End Speech Processing Toolkit},
30
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
31
+ year={2018},
32
+ eprint={1804.00015},
33
+ archivePrefix={arXiv},
34
+ primaryClass={cs.CL}
35
+ }
36
+ ```
37
+
38
+ ### Katakana model
39
+ ```BibTex
40
+ @inproceedings{rtakeda2024:iwsds,
41
+ author={Ryu Takeda and Kazunori Komatani},
42
+ title={Toward OOV-word Acquisition during Spoken Dialogue using Syllable-based ASR and Word Segmentation},
43
+ year={2024},
44
+ booktitle={Proceedings of International Workshop on Spoken Dialogue Systems Technology (IWSDS)},
45
+ }
46
+
47
+ @inproceedings{oshio2023:apsipa,
48
+ author={Miki Oshio, Hokuto Munakata, Ryu Takeda and Kazunori Komatani},
49
+ title={Out-Of-Vocabulary Word Detection in Spoken Dialogues Based on Joint Decoding with User Response Patterns},
50
+ year={2023},
51
+ booktitle={Proceedings of Asia Pacific Signal and Information Processing Association (APSIPA)},
52
+ pages={1753-1759}
53
+ }
54
+
55
+ ```
56
+
57
+ license: cc-by-nc-4.0
58
+
exp/asr_stats_raw_jp_char_sp/train/feats_stats.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d77903ab1c6159cb627d842db7a7c2503a5d4138e5f1ddac81353d59354157b1
3
+ size 1402
exp/asr_train_asr_transformer_ja_raw_jp_char_sp/config.yaml ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: train_asr_streaming_transformer.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ drop_last_iter: false
5
+ dry_run: false
6
+ iterator_type: sequence
7
+ valid_iterator_type: null
8
+ output_dir: exp/
9
+ ngpu: 1
10
+ seed: 0
11
+ num_workers: 1
12
+ num_att_plot: 3
13
+ dist_backend: nccl
14
+ dist_init_method: env://
15
+ dist_world_size: null
16
+ dist_rank: null
17
+ local_rank: null
18
+ dist_master_addr: null
19
+ dist_master_port: null
20
+ dist_launcher: null
21
+ multiprocessing_distributed: false
22
+ unused_parameters: false
23
+ sharded_ddp: false
24
+ cudnn_enabled: true
25
+ cudnn_benchmark: false
26
+ cudnn_deterministic: true
27
+ collect_stats: false
28
+ write_collected_feats: false
29
+ max_epoch: 20
30
+ patience: null
31
+ val_scheduler_criterion:
32
+ - valid
33
+ - loss
34
+ early_stopping_criterion:
35
+ - valid
36
+ - loss
37
+ - min
38
+ best_model_criterion:
39
+ - - valid
40
+ - cer
41
+ - min
42
+ keep_nbest_models: 10
43
+ nbest_averaging_interval: 0
44
+ grad_clip: 5.0
45
+ grad_clip_type: 2.0
46
+ grad_noise: false
47
+ accum_grad: 6
48
+ no_forward_run: false
49
+ resume: true
50
+ train_dtype: float32
51
+ use_amp: false
52
+ log_interval: null
53
+ use_matplotlib: true
54
+ use_tensorboard: true
55
+ create_graph_in_tensorboard: false
56
+ use_wandb: false
57
+ wandb_project: null
58
+ wandb_id: null
59
+ wandb_entity: null
60
+ wandb_name: null
61
+ wandb_model_log_interval: -1
62
+ detect_anomaly: false
63
+ pretrain_path: null
64
+ init_param: []
65
+ ignore_init_mismatch: false
66
+ freeze_param: []
67
+ num_iters_per_epoch: null
68
+ batch_size: 20
69
+ valid_batch_size: null
70
+ batch_bins: 10000000
71
+ valid_batch_bins: null
72
+ train_shape_file:
73
+ -
74
+ -
75
+ valid_shape_file:
76
+ -
77
+ -
78
+ batch_type: numel
79
+ valid_batch_type: null
80
+ fold_length:
81
+ - 80000
82
+ - 150
83
+ sort_in_batch: descending
84
+ shuffle_within_batch: false
85
+ sort_batch: descending
86
+ multiple_iterator: false
87
+ chunk_length: 500
88
+ chunk_shift_ratio: 0.5
89
+ num_cache_chunks: 1024
90
+ chunk_excluded_key_prefixes: []
91
+ train_data_path_and_name_and_type:
92
+ - -
93
+ - speech
94
+ - sound
95
+ - -
96
+ - text
97
+ - text
98
+ valid_data_path_and_name_and_type:
99
+ - -
100
+ - speech
101
+ - sound
102
+ - -
103
+ - text
104
+ - text
105
+ allow_variable_data_keys: false
106
+ max_cache_size: 0.0
107
+ max_cache_fd: 32
108
+ valid_max_cache_size: null
109
+ exclude_weight_decay: false
110
+ exclude_weight_decay_conf: {}
111
+ optim: adam
112
+ optim_conf:
113
+ lr: 2.0e-05
114
+ scheduler: null
115
+ scheduler_conf: {}
116
+ token_list:
117
+ - <blank>
118
+ - <unk>
119
+ - <b>
120
+ - <e>
121
+ - <o>
122
+ - <i>
123
+ - </>
124
+ - '!'
125
+ - '#'
126
+ - $
127
+ - '%'
128
+ - '&'
129
+ - (
130
+ - )
131
+ - '*'
132
+ - '='
133
+ - +
134
+ - '-'
135
+ - /
136
+ - ':'
137
+ - <
138
+ - '>'
139
+ - '?'
140
+ - '@'
141
+ - A
142
+ - B
143
+ - C
144
+ - D
145
+ - E
146
+ - F
147
+ - G
148
+ - H
149
+ - I
150
+ - J
151
+ - K
152
+ - L
153
+ - M
154
+ - N
155
+ - O
156
+ - P
157
+ - Q
158
+ - R
159
+ - S
160
+ - T
161
+ - U
162
+ - V
163
+ - W
164
+ - X
165
+ - Y
166
+ - Z
167
+ - '['
168
+ - ']'
169
+ - ^
170
+ - _
171
+ - a
172
+ - b
173
+ - c
174
+ - d
175
+ - e
176
+ - f
177
+ - g
178
+ - h
179
+ - i
180
+ - j
181
+ - k
182
+ - l
183
+ - m
184
+ - n
185
+ - o
186
+ - p
187
+ - q
188
+ - r
189
+ - s
190
+ - t
191
+ - u
192
+ - v
193
+ - w
194
+ - x
195
+ - y
196
+ - z
197
+ - '{'
198
+ - '|'
199
+ - '}'
200
+ - 、
201
+ - 。
202
+ - ?
203
+ - !
204
+ - …
205
+ - ー
206
+ - ァ
207
+ - ア
208
+ - ィ
209
+ - イ
210
+ - ゥ
211
+ - ウ
212
+ - ェ
213
+ - エ
214
+ - ォ
215
+ - オ
216
+ - カ
217
+ - ガ
218
+ - キ
219
+ - ギ
220
+ - ク
221
+ - グ
222
+ - ケ
223
+ - ゲ
224
+ - コ
225
+ - ゴ
226
+ - サ
227
+ - ザ
228
+ - シ
229
+ - ジ
230
+ - ス
231
+ - ズ
232
+ - セ
233
+ - ゼ
234
+ - ソ
235
+ - ゾ
236
+ - タ
237
+ - ダ
238
+ - チ
239
+ - ヂ
240
+ - ッ
241
+ - ツ
242
+ - ヅ
243
+ - テ
244
+ - デ
245
+ - ト
246
+ - ド
247
+ - ナ
248
+ - ニ
249
+ - ヌ
250
+ - ネ
251
+ - ノ
252
+ - ハ
253
+ - バ
254
+ - パ
255
+ - ヒ
256
+ - ビ
257
+ - ピ
258
+ - フ
259
+ - ブ
260
+ - プ
261
+ - ヘ
262
+ - ベ
263
+ - ペ
264
+ - ホ
265
+ - ボ
266
+ - ポ
267
+ - マ
268
+ - ミ
269
+ - ム
270
+ - メ
271
+ - モ
272
+ - ャ
273
+ - ヤ
274
+ - ュ
275
+ - ユ
276
+ - ョ
277
+ - ヨ
278
+ - ラ
279
+ - リ
280
+ - ル
281
+ - レ
282
+ - ロ
283
+ - ヮ
284
+ - ワ
285
+ - ヲ
286
+ - ン
287
+ - ヴ
288
+ - <sos/eos>
289
+ init: xavier_uniform
290
+ input_size: null
291
+ ctc_conf:
292
+ dropout_rate: 0.0
293
+ ctc_type: builtin
294
+ reduce: true
295
+ ignore_nan_grad: null
296
+ zero_infinity: true
297
+ joint_net_conf: null
298
+ use_preprocessor: true
299
+ token_type: char
300
+ bpemodel: null
301
+ non_linguistic_symbols: null
302
+ cleaner: null
303
+ g2p: null
304
+ speech_volume_normalize: null
305
+ rir_scp: null
306
+ rir_apply_prob: 1.0
307
+ noise_scp: null
308
+ noise_apply_prob: 1.0
309
+ noise_db_range: '13_15'
310
+ short_noise_thres: 0.5
311
+ aux_ctc_tasks: []
312
+ frontend: default
313
+ frontend_conf:
314
+ fs: 16k
315
+ specaug: specaug
316
+ specaug_conf:
317
+ apply_time_warp: true
318
+ time_warp_window: 5
319
+ time_warp_mode: bicubic
320
+ apply_freq_mask: true
321
+ freq_mask_width_range:
322
+ - 0
323
+ - 30
324
+ num_freq_mask: 2
325
+ apply_time_mask: true
326
+ time_mask_width_range:
327
+ - 0
328
+ - 40
329
+ num_time_mask: 2
330
+ normalize: global_mvn
331
+ normalize_conf:
332
+ stats_file: exp/asr_stats_raw_jp_char_sp/train/feats_stats.npz
333
+ model: espnet
334
+ model_conf:
335
+ ctc_weight: 0.3
336
+ lsm_weight: 0.1
337
+ length_normalized_loss: false
338
+ preencoder: null
339
+ preencoder_conf: {}
340
+ encoder: contextual_block_transformer
341
+ encoder_conf:
342
+ output_size: 512
343
+ attention_heads: 8
344
+ linear_units: 2048
345
+ num_blocks: 18
346
+ dropout_rate: 0.1
347
+ positional_dropout_rate: 0.1
348
+ attention_dropout_rate: 0.1
349
+ input_layer: conv2d6
350
+ normalize_before: true
351
+ block_size: 20
352
+ hop_size: 8
353
+ look_ahead: 8
354
+ init_average: true
355
+ ctx_pos_enc: true
356
+ postencoder: null
357
+ postencoder_conf: {}
358
+ decoder: transformer
359
+ decoder_conf:
360
+ attention_heads: 8
361
+ linear_units: 2048
362
+ num_blocks: 6
363
+ dropout_rate: 0.1
364
+ positional_dropout_rate: 0.1
365
+ self_attention_dropout_rate: 0.1
366
+ src_attention_dropout_rate: 0.1
367
+ preprocessor: default
368
+ preprocessor_conf: {}
369
+ required:
370
+ - output_dir
371
+ - token_list
372
+ version: '202308'
373
+ distributed: false
exp/asr_train_asr_transformer_ja_raw_jp_char_sp/valid.cer.ave_10best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0a0bfd4c881e42c0fea71e9f8b137f71c9d90619e61012f34fd7182a9812a19
3
+ size 368015808
exp/lm_train_lm_ja_char/config.yaml ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: train_lm_transformer.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ drop_last_iter: false
5
+ dry_run: false
6
+ iterator_type: sequence
7
+ valid_iterator_type: null
8
+ output_dir:
9
+ ngpu: 1
10
+ seed: 0
11
+ num_workers: 1
12
+ num_att_plot: 3
13
+ dist_backend: nccl
14
+ dist_init_method: env://
15
+ dist_world_size: null
16
+ dist_rank: null
17
+ local_rank: 0
18
+ dist_master_addr: null
19
+ dist_master_port: null
20
+ dist_launcher: null
21
+ multiprocessing_distributed: false
22
+ unused_parameters: false
23
+ sharded_ddp: false
24
+ cudnn_enabled: true
25
+ cudnn_benchmark: false
26
+ cudnn_deterministic: true
27
+ collect_stats: false
28
+ write_collected_feats: false
29
+ max_epoch: 20
30
+ patience: null
31
+ val_scheduler_criterion:
32
+ - valid
33
+ - loss
34
+ early_stopping_criterion:
35
+ - valid
36
+ - loss
37
+ - min
38
+ best_model_criterion:
39
+ - - valid
40
+ - loss
41
+ - min
42
+ keep_nbest_models: 10
43
+ nbest_averaging_interval: 0
44
+ grad_clip: 5.0
45
+ grad_clip_type: 2.0
46
+ grad_noise: false
47
+ accum_grad: 2
48
+ no_forward_run: false
49
+ resume: false
50
+ train_dtype: float32
51
+ use_amp: false
52
+ log_interval: null
53
+ use_matplotlib: true
54
+ use_tensorboard: true
55
+ create_graph_in_tensorboard: false
56
+ use_wandb: false
57
+ wandb_project: null
58
+ wandb_id: null
59
+ wandb_entity: null
60
+ wandb_name: null
61
+ wandb_model_log_interval: -1
62
+ detect_anomaly: false
63
+ pretrain_path: null
64
+ init_param: []
65
+ ignore_init_mismatch: false
66
+ freeze_param: []
67
+ num_iters_per_epoch: null
68
+ batch_size: 20
69
+ valid_batch_size: null
70
+ batch_bins: 4000000
71
+ valid_batch_bins: null
72
+ train_shape_file:
73
+ -
74
+ valid_shape_file:
75
+ -
76
+ batch_type: numel
77
+ valid_batch_type: null
78
+ fold_length:
79
+ - 150
80
+ sort_in_batch: descending
81
+ shuffle_within_batch: false
82
+ sort_batch: descending
83
+ multiple_iterator: false
84
+ chunk_length: 500
85
+ chunk_shift_ratio: 0.5
86
+ num_cache_chunks: 1024
87
+ chunk_excluded_key_prefixes: []
88
+ train_data_path_and_name_and_type:
89
+ - -
90
+ - text
91
+ - text
92
+ valid_data_path_and_name_and_type:
93
+ - -
94
+ - text
95
+ - text
96
+ allow_variable_data_keys: false
97
+ max_cache_size: 0.0
98
+ max_cache_fd: 32
99
+ valid_max_cache_size: null
100
+ exclude_weight_decay: false
101
+ exclude_weight_decay_conf: {}
102
+ optim: adam
103
+ optim_conf:
104
+ lr: 0.001
105
+ scheduler: warmuplr
106
+ scheduler_conf:
107
+ warmup_steps: 25000
108
+ token_list:
109
+ - <blank>
110
+ - <unk>
111
+ - <b>
112
+ - <e>
113
+ - <o>
114
+ - <i>
115
+ - </>
116
+ - '!'
117
+ - '#'
118
+ - $
119
+ - '%'
120
+ - '&'
121
+ - (
122
+ - )
123
+ - '*'
124
+ - '='
125
+ - +
126
+ - '-'
127
+ - /
128
+ - ':'
129
+ - <
130
+ - '>'
131
+ - '?'
132
+ - '@'
133
+ - A
134
+ - B
135
+ - C
136
+ - D
137
+ - E
138
+ - F
139
+ - G
140
+ - H
141
+ - I
142
+ - J
143
+ - K
144
+ - L
145
+ - M
146
+ - N
147
+ - O
148
+ - P
149
+ - Q
150
+ - R
151
+ - S
152
+ - T
153
+ - U
154
+ - V
155
+ - W
156
+ - X
157
+ - Y
158
+ - Z
159
+ - '['
160
+ - ']'
161
+ - ^
162
+ - _
163
+ - a
164
+ - b
165
+ - c
166
+ - d
167
+ - e
168
+ - f
169
+ - g
170
+ - h
171
+ - i
172
+ - j
173
+ - k
174
+ - l
175
+ - m
176
+ - n
177
+ - o
178
+ - p
179
+ - q
180
+ - r
181
+ - s
182
+ - t
183
+ - u
184
+ - v
185
+ - w
186
+ - x
187
+ - y
188
+ - z
189
+ - '{'
190
+ - '|'
191
+ - '}'
192
+ - 、
193
+ - 。
194
+ - ?
195
+ - !
196
+ - …
197
+ - ー
198
+ - ァ
199
+ - ア
200
+ - ィ
201
+ - イ
202
+ - ゥ
203
+ - ウ
204
+ - ェ
205
+ - エ
206
+ - ォ
207
+ - オ
208
+ - カ
209
+ - ガ
210
+ - キ
211
+ - ギ
212
+ - ク
213
+ - グ
214
+ - ケ
215
+ - ゲ
216
+ - コ
217
+ - ゴ
218
+ - サ
219
+ - ザ
220
+ - シ
221
+ - ジ
222
+ - ス
223
+ - ズ
224
+ - セ
225
+ - ゼ
226
+ - ソ
227
+ - ゾ
228
+ - タ
229
+ - ダ
230
+ - チ
231
+ - ヂ
232
+ - ッ
233
+ - ツ
234
+ - ヅ
235
+ - テ
236
+ - デ
237
+ - ト
238
+ - ド
239
+ - ナ
240
+ - ニ
241
+ - ヌ
242
+ - ネ
243
+ - ノ
244
+ - ハ
245
+ - バ
246
+ - パ
247
+ - ヒ
248
+ - ビ
249
+ - ピ
250
+ - フ
251
+ - ブ
252
+ - プ
253
+ - ヘ
254
+ - ベ
255
+ - ペ
256
+ - ホ
257
+ - ボ
258
+ - ポ
259
+ - マ
260
+ - ミ
261
+ - ム
262
+ - メ
263
+ - モ
264
+ - ャ
265
+ - ヤ
266
+ - ュ
267
+ - ユ
268
+ - ョ
269
+ - ヨ
270
+ - ラ
271
+ - リ
272
+ - ル
273
+ - レ
274
+ - ロ
275
+ - ヮ
276
+ - ワ
277
+ - ヲ
278
+ - ン
279
+ - ヴ
280
+ - <sos/eos>
281
+ init: null
282
+ model_conf:
283
+ ignore_id: 0
284
+ use_preprocessor: true
285
+ token_type: char
286
+ bpemodel: null
287
+ non_linguistic_symbols: null
288
+ cleaner: null
289
+ g2p: null
290
+ lm: transformer
291
+ lm_conf:
292
+ pos_enc: null
293
+ embed_unit: 256
294
+ att_unit: 512
295
+ head: 8
296
+ unit: 2048
297
+ layer: 16
298
+ dropout_rate: 0.1
299
+ required:
300
+ - output_dir
301
+ - token_list
302
+ version: '202308'
303
+ distributed: false
exp/lm_train_lm_ja_char/valid.loss.ave_5best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d9c4920e781f8323bca07939d006d8fd41fda69d482e6bc165d352d3ca1fde0
3
+ size 202919113
meta.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ files:
2
+ asr_model_file: exp/asr_train_asr_transformer_ja_raw_jp_char_sp/valid.cer.ave_10best.pth
3
+ lm_file: exp/lm_train_lm_ja_char/valid.loss.ave_5best.pth
4
+ yaml_files:
5
+ asr_train_config: exp/asr_train_asr_transformer_ja_raw_jp_char_sp/config.yaml
6
+ lm_train_config: exp/lm_train_lm_ja_char/config.yaml