mrmuminov commited on
Commit
35b06fb
·
verified ·
1 Parent(s): 0cfee69

Training in progress, step 2000

Browse files
config.json CHANGED
@@ -7,10 +7,7 @@
7
  "WhisperForConditionalGeneration"
8
  ],
9
  "attention_dropout": 0.0,
10
- "begin_suppress_tokens": [
11
- 220,
12
- 50257
13
- ],
14
  "bos_token_id": 50257,
15
  "classifier_proj_size": 256,
16
  "d_model": 768,
@@ -25,7 +22,20 @@
25
  "encoder_layerdrop": 0.0,
26
  "encoder_layers": 12,
27
  "eos_token_id": 50257,
28
- "forced_decoder_ids": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  "init_std": 0.02,
30
  "is_encoder_decoder": true,
31
  "mask_feature_length": 10,
@@ -34,7 +44,7 @@
34
  "mask_time_length": 10,
35
  "mask_time_min_masks": 2,
36
  "mask_time_prob": 0.05,
37
- "max_length": 448,
38
  "max_source_positions": 1500,
39
  "max_target_positions": 448,
40
  "median_filter_width": 7,
@@ -43,9 +53,8 @@
43
  "num_mel_bins": 80,
44
  "pad_token_id": 50257,
45
  "scale_embedding": false,
46
- "suppress_tokens": [],
47
  "torch_dtype": "float32",
48
- "transformers_version": "4.37.2",
49
  "use_cache": true,
50
  "use_weighted_layer_sum": false,
51
  "vocab_size": 51865
 
7
  "WhisperForConditionalGeneration"
8
  ],
9
  "attention_dropout": 0.0,
10
+ "begin_suppress_tokens": null,
 
 
 
11
  "bos_token_id": 50257,
12
  "classifier_proj_size": 256,
13
  "d_model": 768,
 
22
  "encoder_layerdrop": 0.0,
23
  "encoder_layers": 12,
24
  "eos_token_id": 50257,
25
+ "forced_decoder_ids": [
26
+ [
27
+ 1,
28
+ 50259
29
+ ],
30
+ [
31
+ 2,
32
+ 50359
33
+ ],
34
+ [
35
+ 3,
36
+ 50363
37
+ ]
38
+ ],
39
  "init_std": 0.02,
40
  "is_encoder_decoder": true,
41
  "mask_feature_length": 10,
 
44
  "mask_time_length": 10,
45
  "mask_time_min_masks": 2,
46
  "mask_time_prob": 0.05,
47
+ "max_length": null,
48
  "max_source_positions": 1500,
49
  "max_target_positions": 448,
50
  "median_filter_width": 7,
 
53
  "num_mel_bins": 80,
54
  "pad_token_id": 50257,
55
  "scale_embedding": false,
 
56
  "torch_dtype": "float32",
57
+ "transformers_version": "4.49.0",
58
  "use_cache": true,
59
  "use_weighted_layer_sum": false,
60
  "vocab_size": 51865
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00dc2310fbc2c10b504901d9dd665cbc6dde15668da4053224efdc92c38f0348
3
  size 966995080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be358e9efaa354debfeaefa2fc491089e7c731bf825fe9ac1aa3611571152144
3
  size 966995080
runs/Jul08_08-09-46_ai/events.out.tfevents.1751962189.ai.2985200.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cca8ea8c4df8915336db2968fa6f4625b88e6147998bcf0ae2dccc465d4ed03
3
+ size 24012
tokenizer_config.json CHANGED
@@ -12980,6 +12980,7 @@
12980
  "clean_up_tokenization_spaces": true,
12981
  "eos_token": "<|endoftext|>",
12982
  "errors": "replace",
 
12983
  "model_max_length": 1024,
12984
  "pad_token": "<|endoftext|>",
12985
  "processor_class": "WhisperProcessor",
 
12980
  "clean_up_tokenization_spaces": true,
12981
  "eos_token": "<|endoftext|>",
12982
  "errors": "replace",
12983
+ "extra_special_tokens": {},
12984
  "model_max_length": 1024,
12985
  "pad_token": "<|endoftext|>",
12986
  "processor_class": "WhisperProcessor",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec094e96acc47dca873646a94ffa88f4008e86541a30b5f9ee3b506645f8facf
3
- size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e431dde707b762492d683315e47d864ecd11713b65bca70ecef583f3af4a429f
3
+ size 5496