lijingbei commited on
Commit
e25ea46
·
1 Parent(s): 7bb2a7d

added model files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "StepAudio2ForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_step_audio_2.StepAudio2Config",
7
+ "AutoModelForCausalLM": "modeling_step_audio_2.StepAudio2ForCausalLM"
8
+ },
9
+ "model_type": "step_audio_2",
10
+ "hidden_size": 3584,
11
+ "intermediate_size": 18944,
12
+ "num_attention_heads": 28,
13
+ "num_attention_groups": 4,
14
+ "num_key_value_heads": 4,
15
+ "num_hidden_layers": 28,
16
+ "max_seq_len": 16384,
17
+ "vocab_size": 158720,
18
+ "rms_norm_eps": 1e-06,
19
+ "eos_token_id": 151643,
20
+ "pad_token_id": 151643,
21
+ "rope_theta": 1000000.0,
22
+ "max_position_embeddings": 16384,
23
+ "rope_scaling": null,
24
+ "torch_dtype": "bfloat16",
25
+ "audio_encoder_config": {
26
+ "n_mels": 128,
27
+ "n_audio_ctx": 1500,
28
+ "n_audio_state": 1280,
29
+ "n_audio_head": 20,
30
+ "n_audio_layer": 32,
31
+ "n_codebook_size": 4096,
32
+ "llm_dim": 3584,
33
+ "kernel_size": 3,
34
+ "adapter_stride": 2
35
+ }
36
+ }
configuration_step_audio_2.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from transformers import Qwen2Config
4
+ from transformers.configuration_utils import PretrainedConfig
5
+
6
+
7
+ class StepAudio2EncoderConfig(PretrainedConfig):
8
+ model_type = "step_audio_2_encoder"
9
+
10
+ def __init__(
11
+ self,
12
+ n_mels=128,
13
+ n_audio_ctx=1500,
14
+ n_audio_state=512,
15
+ n_audio_head=8,
16
+ n_audio_layer=6,
17
+ llm_dim=4096,
18
+ kernel_size=3,
19
+ adapter_stride=2,
20
+ **kwargs,
21
+ ):
22
+ self.n_mels = n_mels
23
+ self.n_audio_ctx = n_audio_ctx
24
+ self.n_audio_state = n_audio_state
25
+ self.n_audio_head = n_audio_head
26
+ self.n_audio_layer = n_audio_layer
27
+ self.llm_dim = llm_dim
28
+ self.kernel_size = kernel_size
29
+ self.adapter_stride = adapter_stride
30
+ super().__init__(**kwargs)
31
+
32
+ class StepAudio2Config(PretrainedConfig):
33
+ model_type = "step_audio_2"
34
+ architectures = ["StepAudio2ForCausalLM"]
35
+
36
+ def __init__(
37
+ self,
38
+ audio_encoder_config=None,
39
+ use_sliding_window: bool = False,
40
+ sliding_window: Optional[int] = 2048,
41
+ max_window_layers: Optional[int] = None,
42
+ **kwargs
43
+ ):
44
+ kwargs.setdefault("use_sliding_window", use_sliding_window)
45
+ kwargs.setdefault("sliding_window", sliding_window)
46
+ if max_window_layers is None:
47
+ max_window_layers = kwargs.get("num_hidden_layers", None)
48
+ kwargs.setdefault("max_window_layers", max_window_layers)
49
+ super().__init__(**kwargs)
50
+
51
+ self.text_config = Qwen2Config(**kwargs)
52
+
53
+ if audio_encoder_config is None:
54
+ self.audio_encoder_config = StepAudio2EncoderConfig()
55
+ elif isinstance(audio_encoder_config, dict):
56
+ self.audio_encoder_config = StepAudio2EncoderConfig(**audio_encoder_config)
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ef7707a8393881c4f4faafd812d95d4b0653a02744fb0b5a580b3fea0390caf
3
+ size 9925030808
model-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60d79d9b80ca1374614509a8d26dfbfbbb875fbcf192cdf7b3355fa21d3de69f
3
+ size 6705418376
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"total_size": 16630358528}, "weight_map": {"encoder.conv1.weight": "model-00001.safetensors", "encoder.conv1.bias": "model-00001.safetensors", "encoder.conv2.weight": "model-00001.safetensors", "encoder.conv2.bias": "model-00001.safetensors", "encoder.positional_embedding.weight": "model-00001.safetensors", "encoder.blocks.0.attn.query.weight": "model-00001.safetensors", "encoder.blocks.0.attn.query.bias": "model-00001.safetensors", "encoder.blocks.0.attn.key.weight": "model-00001.safetensors", "encoder.blocks.0.attn.value.weight": "model-00001.safetensors", "encoder.blocks.0.attn.value.bias": "model-00001.safetensors", "encoder.blocks.0.attn.out.weight": "model-00001.safetensors", "encoder.blocks.0.attn.out.bias": "model-00001.safetensors", "encoder.blocks.0.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.0.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.0.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.0.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.0.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.0.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.0.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.0.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.1.attn.query.weight": "model-00001.safetensors", "encoder.blocks.1.attn.query.bias": "model-00001.safetensors", "encoder.blocks.1.attn.key.weight": "model-00001.safetensors", "encoder.blocks.1.attn.value.weight": "model-00001.safetensors", "encoder.blocks.1.attn.value.bias": "model-00001.safetensors", "encoder.blocks.1.attn.out.weight": "model-00001.safetensors", "encoder.blocks.1.attn.out.bias": "model-00001.safetensors", "encoder.blocks.1.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.1.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.1.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.1.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.1.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.1.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.1.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.1.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.2.attn.query.weight": "model-00001.safetensors", "encoder.blocks.2.attn.query.bias": "model-00001.safetensors", "encoder.blocks.2.attn.key.weight": "model-00001.safetensors", "encoder.blocks.2.attn.value.weight": "model-00001.safetensors", "encoder.blocks.2.attn.value.bias": "model-00001.safetensors", "encoder.blocks.2.attn.out.weight": "model-00001.safetensors", "encoder.blocks.2.attn.out.bias": "model-00001.safetensors", "encoder.blocks.2.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.2.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.2.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.2.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.2.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.2.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.2.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.2.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.3.attn.query.weight": "model-00001.safetensors", "encoder.blocks.3.attn.query.bias": "model-00001.safetensors", "encoder.blocks.3.attn.key.weight": "model-00001.safetensors", "encoder.blocks.3.attn.value.weight": "model-00001.safetensors", "encoder.blocks.3.attn.value.bias": "model-00001.safetensors", "encoder.blocks.3.attn.out.weight": "model-00001.safetensors", "encoder.blocks.3.attn.out.bias": "model-00001.safetensors", "encoder.blocks.3.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.3.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.3.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.3.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.3.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.3.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.3.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.3.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.4.attn.query.weight": "model-00001.safetensors", "encoder.blocks.4.attn.query.bias": "model-00001.safetensors", "encoder.blocks.4.attn.key.weight": "model-00001.safetensors", "encoder.blocks.4.attn.value.weight": "model-00001.safetensors", "encoder.blocks.4.attn.value.bias": "model-00001.safetensors", "encoder.blocks.4.attn.out.weight": "model-00001.safetensors", "encoder.blocks.4.attn.out.bias": "model-00001.safetensors", "encoder.blocks.4.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.4.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.4.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.4.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.4.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.4.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.4.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.4.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.5.attn.query.weight": "model-00001.safetensors", "encoder.blocks.5.attn.query.bias": "model-00001.safetensors", "encoder.blocks.5.attn.key.weight": "model-00001.safetensors", "encoder.blocks.5.attn.value.weight": "model-00001.safetensors", "encoder.blocks.5.attn.value.bias": "model-00001.safetensors", "encoder.blocks.5.attn.out.weight": "model-00001.safetensors", "encoder.blocks.5.attn.out.bias": "model-00001.safetensors", "encoder.blocks.5.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.5.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.5.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.5.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.5.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.5.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.5.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.5.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.6.attn.query.weight": "model-00001.safetensors", "encoder.blocks.6.attn.query.bias": "model-00001.safetensors", "encoder.blocks.6.attn.key.weight": "model-00001.safetensors", "encoder.blocks.6.attn.value.weight": "model-00001.safetensors", "encoder.blocks.6.attn.value.bias": "model-00001.safetensors", "encoder.blocks.6.attn.out.weight": "model-00001.safetensors", "encoder.blocks.6.attn.out.bias": "model-00001.safetensors", "encoder.blocks.6.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.6.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.6.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.6.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.6.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.6.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.6.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.6.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.7.attn.query.weight": "model-00001.safetensors", "encoder.blocks.7.attn.query.bias": "model-00001.safetensors", "encoder.blocks.7.attn.key.weight": "model-00001.safetensors", "encoder.blocks.7.attn.value.weight": "model-00001.safetensors", "encoder.blocks.7.attn.value.bias": "model-00001.safetensors", "encoder.blocks.7.attn.out.weight": "model-00001.safetensors", "encoder.blocks.7.attn.out.bias": "model-00001.safetensors", "encoder.blocks.7.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.7.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.7.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.7.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.7.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.7.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.7.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.7.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.8.attn.query.weight": "model-00001.safetensors", "encoder.blocks.8.attn.query.bias": "model-00001.safetensors", "encoder.blocks.8.attn.key.weight": "model-00001.safetensors", "encoder.blocks.8.attn.value.weight": "model-00001.safetensors", "encoder.blocks.8.attn.value.bias": "model-00001.safetensors", "encoder.blocks.8.attn.out.weight": "model-00001.safetensors", "encoder.blocks.8.attn.out.bias": "model-00001.safetensors", "encoder.blocks.8.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.8.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.8.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.8.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.8.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.8.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.8.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.8.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.9.attn.query.weight": "model-00001.safetensors", "encoder.blocks.9.attn.query.bias": "model-00001.safetensors", "encoder.blocks.9.attn.key.weight": "model-00001.safetensors", "encoder.blocks.9.attn.value.weight": "model-00001.safetensors", "encoder.blocks.9.attn.value.bias": "model-00001.safetensors", "encoder.blocks.9.attn.out.weight": "model-00001.safetensors", "encoder.blocks.9.attn.out.bias": "model-00001.safetensors", "encoder.blocks.9.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.9.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.9.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.9.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.9.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.9.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.9.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.9.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.10.attn.query.weight": "model-00001.safetensors", "encoder.blocks.10.attn.query.bias": "model-00001.safetensors", "encoder.blocks.10.attn.key.weight": "model-00001.safetensors", "encoder.blocks.10.attn.value.weight": "model-00001.safetensors", "encoder.blocks.10.attn.value.bias": "model-00001.safetensors", "encoder.blocks.10.attn.out.weight": "model-00001.safetensors", "encoder.blocks.10.attn.out.bias": "model-00001.safetensors", "encoder.blocks.10.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.10.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.10.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.10.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.10.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.10.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.10.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.10.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.11.attn.query.weight": "model-00001.safetensors", "encoder.blocks.11.attn.query.bias": "model-00001.safetensors", "encoder.blocks.11.attn.key.weight": "model-00001.safetensors", "encoder.blocks.11.attn.value.weight": "model-00001.safetensors", "encoder.blocks.11.attn.value.bias": "model-00001.safetensors", "encoder.blocks.11.attn.out.weight": "model-00001.safetensors", "encoder.blocks.11.attn.out.bias": "model-00001.safetensors", "encoder.blocks.11.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.11.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.11.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.11.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.11.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.11.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.11.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.11.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.12.attn.query.weight": "model-00001.safetensors", "encoder.blocks.12.attn.query.bias": "model-00001.safetensors", "encoder.blocks.12.attn.key.weight": "model-00001.safetensors", "encoder.blocks.12.attn.value.weight": "model-00001.safetensors", "encoder.blocks.12.attn.value.bias": "model-00001.safetensors", "encoder.blocks.12.attn.out.weight": "model-00001.safetensors", "encoder.blocks.12.attn.out.bias": "model-00001.safetensors", "encoder.blocks.12.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.12.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.12.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.12.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.12.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.12.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.12.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.12.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.13.attn.query.weight": "model-00001.safetensors", "encoder.blocks.13.attn.query.bias": "model-00001.safetensors", "encoder.blocks.13.attn.key.weight": "model-00001.safetensors", "encoder.blocks.13.attn.value.weight": "model-00001.safetensors", "encoder.blocks.13.attn.value.bias": "model-00001.safetensors", "encoder.blocks.13.attn.out.weight": "model-00001.safetensors", "encoder.blocks.13.attn.out.bias": "model-00001.safetensors", "encoder.blocks.13.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.13.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.13.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.13.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.13.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.13.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.13.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.13.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.14.attn.query.weight": "model-00001.safetensors", "encoder.blocks.14.attn.query.bias": "model-00001.safetensors", "encoder.blocks.14.attn.key.weight": "model-00001.safetensors", "encoder.blocks.14.attn.value.weight": "model-00001.safetensors", "encoder.blocks.14.attn.value.bias": "model-00001.safetensors", "encoder.blocks.14.attn.out.weight": "model-00001.safetensors", "encoder.blocks.14.attn.out.bias": "model-00001.safetensors", "encoder.blocks.14.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.14.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.14.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.14.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.14.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.14.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.14.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.14.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.15.attn.query.weight": "model-00001.safetensors", "encoder.blocks.15.attn.query.bias": "model-00001.safetensors", "encoder.blocks.15.attn.key.weight": "model-00001.safetensors", "encoder.blocks.15.attn.value.weight": "model-00001.safetensors", "encoder.blocks.15.attn.value.bias": "model-00001.safetensors", "encoder.blocks.15.attn.out.weight": "model-00001.safetensors", "encoder.blocks.15.attn.out.bias": "model-00001.safetensors", "encoder.blocks.15.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.15.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.15.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.15.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.15.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.15.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.15.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.15.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.16.attn.query.weight": "model-00001.safetensors", "encoder.blocks.16.attn.query.bias": "model-00001.safetensors", "encoder.blocks.16.attn.key.weight": "model-00001.safetensors", "encoder.blocks.16.attn.value.weight": "model-00001.safetensors", "encoder.blocks.16.attn.value.bias": "model-00001.safetensors", "encoder.blocks.16.attn.out.weight": "model-00001.safetensors", "encoder.blocks.16.attn.out.bias": "model-00001.safetensors", "encoder.blocks.16.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.16.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.16.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.16.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.16.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.16.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.16.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.16.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.17.attn.query.weight": "model-00001.safetensors", "encoder.blocks.17.attn.query.bias": "model-00001.safetensors", "encoder.blocks.17.attn.key.weight": "model-00001.safetensors", "encoder.blocks.17.attn.value.weight": "model-00001.safetensors", "encoder.blocks.17.attn.value.bias": "model-00001.safetensors", "encoder.blocks.17.attn.out.weight": "model-00001.safetensors", "encoder.blocks.17.attn.out.bias": "model-00001.safetensors", "encoder.blocks.17.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.17.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.17.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.17.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.17.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.17.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.17.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.17.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.18.attn.query.weight": "model-00001.safetensors", "encoder.blocks.18.attn.query.bias": "model-00001.safetensors", "encoder.blocks.18.attn.key.weight": "model-00001.safetensors", "encoder.blocks.18.attn.value.weight": "model-00001.safetensors", "encoder.blocks.18.attn.value.bias": "model-00001.safetensors", "encoder.blocks.18.attn.out.weight": "model-00001.safetensors", "encoder.blocks.18.attn.out.bias": "model-00001.safetensors", "encoder.blocks.18.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.18.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.18.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.18.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.18.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.18.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.18.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.18.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.19.attn.query.weight": "model-00001.safetensors", "encoder.blocks.19.attn.query.bias": "model-00001.safetensors", "encoder.blocks.19.attn.key.weight": "model-00001.safetensors", "encoder.blocks.19.attn.value.weight": "model-00001.safetensors", "encoder.blocks.19.attn.value.bias": "model-00001.safetensors", "encoder.blocks.19.attn.out.weight": "model-00001.safetensors", "encoder.blocks.19.attn.out.bias": "model-00001.safetensors", "encoder.blocks.19.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.19.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.19.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.19.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.19.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.19.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.19.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.19.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.20.attn.query.weight": "model-00001.safetensors", "encoder.blocks.20.attn.query.bias": "model-00001.safetensors", "encoder.blocks.20.attn.key.weight": "model-00001.safetensors", "encoder.blocks.20.attn.value.weight": "model-00001.safetensors", "encoder.blocks.20.attn.value.bias": "model-00001.safetensors", "encoder.blocks.20.attn.out.weight": "model-00001.safetensors", "encoder.blocks.20.attn.out.bias": "model-00001.safetensors", "encoder.blocks.20.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.20.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.20.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.20.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.20.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.20.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.20.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.20.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.21.attn.query.weight": "model-00001.safetensors", "encoder.blocks.21.attn.query.bias": "model-00001.safetensors", "encoder.blocks.21.attn.key.weight": "model-00001.safetensors", "encoder.blocks.21.attn.value.weight": "model-00001.safetensors", "encoder.blocks.21.attn.value.bias": "model-00001.safetensors", "encoder.blocks.21.attn.out.weight": "model-00001.safetensors", "encoder.blocks.21.attn.out.bias": "model-00001.safetensors", "encoder.blocks.21.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.21.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.21.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.21.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.21.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.21.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.21.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.21.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.22.attn.query.weight": "model-00001.safetensors", "encoder.blocks.22.attn.query.bias": "model-00001.safetensors", "encoder.blocks.22.attn.key.weight": "model-00001.safetensors", "encoder.blocks.22.attn.value.weight": "model-00001.safetensors", "encoder.blocks.22.attn.value.bias": "model-00001.safetensors", "encoder.blocks.22.attn.out.weight": "model-00001.safetensors", "encoder.blocks.22.attn.out.bias": "model-00001.safetensors", "encoder.blocks.22.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.22.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.22.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.22.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.22.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.22.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.22.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.22.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.23.attn.query.weight": "model-00001.safetensors", "encoder.blocks.23.attn.query.bias": "model-00001.safetensors", "encoder.blocks.23.attn.key.weight": "model-00001.safetensors", "encoder.blocks.23.attn.value.weight": "model-00001.safetensors", "encoder.blocks.23.attn.value.bias": "model-00001.safetensors", "encoder.blocks.23.attn.out.weight": "model-00001.safetensors", "encoder.blocks.23.attn.out.bias": "model-00001.safetensors", "encoder.blocks.23.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.23.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.23.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.23.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.23.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.23.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.23.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.23.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.24.attn.query.weight": "model-00001.safetensors", "encoder.blocks.24.attn.query.bias": "model-00001.safetensors", "encoder.blocks.24.attn.key.weight": "model-00001.safetensors", "encoder.blocks.24.attn.value.weight": "model-00001.safetensors", "encoder.blocks.24.attn.value.bias": "model-00001.safetensors", "encoder.blocks.24.attn.out.weight": "model-00001.safetensors", "encoder.blocks.24.attn.out.bias": "model-00001.safetensors", "encoder.blocks.24.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.24.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.24.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.24.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.24.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.24.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.24.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.24.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.25.attn.query.weight": "model-00001.safetensors", "encoder.blocks.25.attn.query.bias": "model-00001.safetensors", "encoder.blocks.25.attn.key.weight": "model-00001.safetensors", "encoder.blocks.25.attn.value.weight": "model-00001.safetensors", "encoder.blocks.25.attn.value.bias": "model-00001.safetensors", "encoder.blocks.25.attn.out.weight": "model-00001.safetensors", "encoder.blocks.25.attn.out.bias": "model-00001.safetensors", "encoder.blocks.25.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.25.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.25.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.25.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.25.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.25.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.25.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.25.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.26.attn.query.weight": "model-00001.safetensors", "encoder.blocks.26.attn.query.bias": "model-00001.safetensors", "encoder.blocks.26.attn.key.weight": "model-00001.safetensors", "encoder.blocks.26.attn.value.weight": "model-00001.safetensors", "encoder.blocks.26.attn.value.bias": "model-00001.safetensors", "encoder.blocks.26.attn.out.weight": "model-00001.safetensors", "encoder.blocks.26.attn.out.bias": "model-00001.safetensors", "encoder.blocks.26.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.26.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.26.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.26.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.26.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.26.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.26.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.26.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.27.attn.query.weight": "model-00001.safetensors", "encoder.blocks.27.attn.query.bias": "model-00001.safetensors", "encoder.blocks.27.attn.key.weight": "model-00001.safetensors", "encoder.blocks.27.attn.value.weight": "model-00001.safetensors", "encoder.blocks.27.attn.value.bias": "model-00001.safetensors", "encoder.blocks.27.attn.out.weight": "model-00001.safetensors", "encoder.blocks.27.attn.out.bias": "model-00001.safetensors", "encoder.blocks.27.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.27.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.27.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.27.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.27.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.27.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.27.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.27.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.28.attn.query.weight": "model-00001.safetensors", "encoder.blocks.28.attn.query.bias": "model-00001.safetensors", "encoder.blocks.28.attn.key.weight": "model-00001.safetensors", "encoder.blocks.28.attn.value.weight": "model-00001.safetensors", "encoder.blocks.28.attn.value.bias": "model-00001.safetensors", "encoder.blocks.28.attn.out.weight": "model-00001.safetensors", "encoder.blocks.28.attn.out.bias": "model-00001.safetensors", "encoder.blocks.28.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.28.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.28.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.28.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.28.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.28.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.28.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.28.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.29.attn.query.weight": "model-00001.safetensors", "encoder.blocks.29.attn.query.bias": "model-00001.safetensors", "encoder.blocks.29.attn.key.weight": "model-00001.safetensors", "encoder.blocks.29.attn.value.weight": "model-00001.safetensors", "encoder.blocks.29.attn.value.bias": "model-00001.safetensors", "encoder.blocks.29.attn.out.weight": "model-00001.safetensors", "encoder.blocks.29.attn.out.bias": "model-00001.safetensors", "encoder.blocks.29.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.29.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.29.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.29.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.29.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.29.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.29.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.29.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.30.attn.query.weight": "model-00001.safetensors", "encoder.blocks.30.attn.query.bias": "model-00001.safetensors", "encoder.blocks.30.attn.key.weight": "model-00001.safetensors", "encoder.blocks.30.attn.value.weight": "model-00001.safetensors", "encoder.blocks.30.attn.value.bias": "model-00001.safetensors", "encoder.blocks.30.attn.out.weight": "model-00001.safetensors", "encoder.blocks.30.attn.out.bias": "model-00001.safetensors", "encoder.blocks.30.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.30.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.30.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.30.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.30.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.30.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.30.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.30.mlp_ln.bias": "model-00001.safetensors", "encoder.blocks.31.attn.query.weight": "model-00001.safetensors", "encoder.blocks.31.attn.query.bias": "model-00001.safetensors", "encoder.blocks.31.attn.key.weight": "model-00001.safetensors", "encoder.blocks.31.attn.value.weight": "model-00001.safetensors", "encoder.blocks.31.attn.value.bias": "model-00001.safetensors", "encoder.blocks.31.attn.out.weight": "model-00001.safetensors", "encoder.blocks.31.attn.out.bias": "model-00001.safetensors", "encoder.blocks.31.attn_ln.weight": "model-00001.safetensors", "encoder.blocks.31.attn_ln.bias": "model-00001.safetensors", "encoder.blocks.31.mlp.0.weight": "model-00001.safetensors", "encoder.blocks.31.mlp.0.bias": "model-00001.safetensors", "encoder.blocks.31.mlp.2.weight": "model-00001.safetensors", "encoder.blocks.31.mlp.2.bias": "model-00001.safetensors", "encoder.blocks.31.mlp_ln.weight": "model-00001.safetensors", "encoder.blocks.31.mlp_ln.bias": "model-00001.safetensors", "encoder.after_norm.weight": "model-00001.safetensors", "encoder.after_norm.bias": "model-00001.safetensors", "adapter.conv.weight": "model-00001.safetensors", "adapter.conv.bias": "model-00001.safetensors", "adapter.linear1.weight": "model-00001.safetensors", "adapter.linear1.bias": "model-00001.safetensors", "adapter.linear2.weight": "model-00001.safetensors", "adapter.linear2.bias": "model-00001.safetensors", "model.embed_tokens.weight": "model-00001.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.0.input_layernorm.weight": "model-00001.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.0.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.0.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.0.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.1.input_layernorm.weight": "model-00001.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.1.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.1.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.1.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.2.input_layernorm.weight": "model-00001.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.2.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.2.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.2.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.3.input_layernorm.weight": "model-00001.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.3.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.3.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.3.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.4.input_layernorm.weight": "model-00001.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.4.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.4.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.4.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.5.input_layernorm.weight": "model-00001.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.5.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.5.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.5.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.6.input_layernorm.weight": "model-00001.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.6.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.6.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.6.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.7.input_layernorm.weight": "model-00001.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.7.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.7.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.7.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.8.input_layernorm.weight": "model-00001.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.8.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.8.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.8.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.9.input_layernorm.weight": "model-00001.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.9.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.9.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.9.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.10.input_layernorm.weight": "model-00001.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.10.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.10.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.10.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.11.input_layernorm.weight": "model-00001.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.11.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.11.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.11.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.12.input_layernorm.weight": "model-00001.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.12.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.12.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.12.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.13.input_layernorm.weight": "model-00001.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.13.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.13.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.13.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.14.input_layernorm.weight": "model-00001.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.14.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.14.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.14.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.15.input_layernorm.weight": "model-00001.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00001.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00001.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001.safetensors", "model.layers.15.self_attn.q_proj.bias": "model-00001.safetensors", "model.layers.15.self_attn.k_proj.bias": "model-00001.safetensors", "model.layers.15.self_attn.v_proj.bias": "model-00001.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00001.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00001.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00001.safetensors", "model.layers.16.input_layernorm.weight": "model-00001.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.16.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.16.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.16.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.17.input_layernorm.weight": "model-00002.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.17.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.17.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.17.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.18.input_layernorm.weight": "model-00002.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.18.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.18.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.18.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.19.input_layernorm.weight": "model-00002.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.19.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.19.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.19.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.20.input_layernorm.weight": "model-00002.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.20.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.20.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.20.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.21.input_layernorm.weight": "model-00002.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.21.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.21.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.21.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.22.input_layernorm.weight": "model-00002.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.22.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.22.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.22.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.23.input_layernorm.weight": "model-00002.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.23.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.23.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.23.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.24.input_layernorm.weight": "model-00002.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.24.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.24.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.24.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.25.input_layernorm.weight": "model-00002.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.25.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.25.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.25.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.26.input_layernorm.weight": "model-00002.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.26.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.26.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.26.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00002.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00002.safetensors", "model.layers.27.input_layernorm.weight": "model-00002.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00002.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00002.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00002.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00002.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00002.safetensors", "model.layers.27.self_attn.q_proj.bias": "model-00002.safetensors", "model.layers.27.self_attn.k_proj.bias": "model-00002.safetensors", "model.layers.27.self_attn.v_proj.bias": "model-00002.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00002.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00002.safetensors", "model.norm.weight": "model-00002.safetensors", "lm_head.weight": "model-00002.safetensors"}}
modeling_step_audio_2.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterable, Optional, Tuple
2
+
3
+ import librosa
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import torchaudio
7
+ from torch import Tensor, nn
8
+ from transformers import PreTrainedModel, Qwen2Model
9
+ from transformers.generation.utils import GenerationMixin
10
+ from transformers.modeling_outputs import CausalLMOutputWithPast
11
+
12
+ from .configuration_step_audio_2 import StepAudio2Config
13
+
14
+
15
+ def _mel_filters(n_mels: int) -> torch.Tensor:
16
+ """Load the mel filterbank matrix for projecting STFT into a Mel spectrogram."""
17
+ assert n_mels in {80, 128}, f"Unsupported n_mels: {n_mels}"
18
+ if n_mels == 128:
19
+ return torch.from_numpy(librosa.filters.mel(sr=16000, n_fft=400, n_mels=128))
20
+ else:
21
+ return torch.from_numpy(librosa.filters.mel(sr=16000, n_fft=400, n_mels=80))
22
+
23
+
24
+ def load_audio(file_path, target_rate=16000, max_length=None):
25
+ """
26
+ Open an audio file and read as mono waveform, resampling as necessary
27
+ If max_length is provided, truncate the audio to that length
28
+ """
29
+ waveform, sample_rate = torchaudio.load(file_path)
30
+ if sample_rate != target_rate:
31
+ waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_rate)(waveform)
32
+ audio = waveform[0] # get the first channel
33
+
34
+ # Truncate audio if it exceeds max_length
35
+ if max_length is not None and audio.shape[0] > max_length:
36
+ audio = audio[:max_length]
37
+
38
+ return audio
39
+
40
+ def log_mel_spectrogram(audio, n_mels=128, padding=479, device=None):
41
+ """
42
+ Compute the log-Mel spectrogram with specific padding for StepAudio
43
+ """
44
+ if not torch.is_tensor(audio):
45
+ if isinstance(audio, str):
46
+ audio = load_audio(audio)
47
+ audio = torch.from_numpy(audio)
48
+ if device is not None:
49
+ audio = audio.to(device)
50
+ if padding > 0:
51
+ audio = F.pad(audio, (0, padding))
52
+ window = torch.hann_window(400).to(audio.device)
53
+ stft = torch.stft(audio, 400, 160, window=window, return_complex=True)
54
+ magnitudes = stft[..., :-1].abs() ** 2
55
+ filters = _mel_filters(n_mels)
56
+ mel_spec = filters @ magnitudes
57
+
58
+ log_spec = torch.clamp(mel_spec, min=1e-10).log10()
59
+ log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
60
+ log_spec = (log_spec + 4.0) / 4.0
61
+ return log_spec
62
+
63
+ def compute_token_num(max_feature_len):
64
+ # First, audio goes through encoder:
65
+ # 1. conv1: kernel=3, stride=1, padding=1 -> size unchanged
66
+ # 2. conv2: kernel=3, stride=2, padding=1 -> size/2
67
+ # 3. avg_pooler: kernel=2, stride=2 -> size/2
68
+ max_feature_len = max_feature_len - 2 # remove padding
69
+ encoder_output_dim = (max_feature_len + 1) // 2 // 2 # after conv2 and avg_pooler
70
+
71
+ # Then through adaptor (parameters from config file):
72
+ padding = 1
73
+ kernel_size = 3 # from config: audio_encoder_config.kernel_size
74
+ stride = 2 # from config: audio_encoder_config.adapter_stride
75
+ adapter_output_dim = (encoder_output_dim + 2 * padding - kernel_size) // stride + 1
76
+ return adapter_output_dim
77
+
78
+ def make_non_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
79
+ """Make mask tensor containing indices of non-padded part.
80
+
81
+ The sequences in a batch may have different lengths. To enable
82
+ batch computing, padding is need to make all sequence in same
83
+ size. To avoid the padding part pass value to context dependent
84
+ block such as attention or convolution , this padding part is
85
+ masked.
86
+
87
+ 1 for non-padded part and 0 for padded part.
88
+
89
+ Parameters
90
+ ----------
91
+ lengths (torch.Tensor): Batch of lengths (B,).
92
+
93
+ Returns:
94
+ -------
95
+ torch.Tensor: Mask tensor containing indices of padded part (B, max_T).
96
+
97
+ Examples:
98
+ >>> import torch
99
+ >>> import s3tokenizer
100
+ >>> lengths = torch.tensor([5, 3, 2])
101
+ >>> masks = s3tokenizer.make_non_pad_mask(lengths)
102
+ masks = [[1, 1, 1, 1, 1],
103
+ [1, 1, 1, 0, 0],
104
+ [1, 1, 0, 0, 0]]
105
+ """
106
+ batch_size = lengths.size(0)
107
+ max_len = max_len if max_len > 0 else lengths.max().item()
108
+ seq_range = torch.arange(0,
109
+ max_len,
110
+ dtype=torch.int64,
111
+ device=lengths.device)
112
+ seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
113
+ seq_length_expand = lengths.unsqueeze(-1)
114
+ mask = seq_range_expand >= seq_length_expand
115
+ return ~mask
116
+
117
+ def mask_to_bias(mask: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
118
+ """Convert bool-tensor to float-tensor for flash attention.
119
+
120
+ Parameters
121
+ ----------
122
+ lengths (torch.Tensor): Batch of lengths (B, ?).
123
+
124
+ Returns:
125
+ -------
126
+ torch.Tensor: Mask tensor containing indices of padded part (B, ?).
127
+
128
+ Examples:
129
+ >>> import torch
130
+ >>> import s3tokenizer
131
+ >>> lengths = torch.tensor([5, 3, 2])
132
+ >>> masks = s3tokenizer.make_non_pad_mask(lengths)
133
+ masks = [[1, 1, 1, 1, 1],
134
+ [1, 1, 1, 0, 0],
135
+ [1, 1, 0, 0, 0]]
136
+ >>> new_masks = s3tokenizer.mask_to_bias(masks, torch.float32)
137
+ new_masks = [[-0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00, -0.0000e+00],
138
+ [-0.0000e+00, -0.0000e+00, -0.0000e+00, -1.0000e+10, -1.0000e+10],
139
+ [-0.0000e+00, -0.0000e+00, -1.0000e+10, -1.0000e+10, -1.0000e+10]]
140
+ """
141
+ assert mask.dtype == torch.bool
142
+ assert dtype in [torch.float32, torch.bfloat16, torch.float16]
143
+ mask = mask.to(dtype)
144
+ # attention mask bias
145
+ # NOTE(Mddct): torch.finfo jit issues
146
+ # chunk_masks = (1.0 - chunk_masks) * torch.finfo(dtype).min
147
+ mask = (1.0 - mask) * -1.0e+10
148
+ return mask
149
+
150
+ class LayerNorm(nn.LayerNorm):
151
+ def forward(self, input: Tensor) -> Tensor:
152
+ return super().forward(input).type(input.dtype)
153
+
154
+ class Linear(nn.Linear):
155
+ def forward(self, input: Tensor) -> Tensor:
156
+ return F.linear(
157
+ input,
158
+ self.weight.to(input.dtype),
159
+ None if self.bias is None else self.bias.to(input.dtype),
160
+ )
161
+
162
+ class Conv1d(nn.Conv1d):
163
+ def _conv_forward(
164
+ self, input: Tensor, weight: Tensor, bias: Optional[Tensor]
165
+ ) -> Tensor:
166
+ return super()._conv_forward(
167
+ input, weight.to(input.dtype), None if bias is None else bias.to(input.dtype)
168
+ )
169
+
170
+ class MultiHeadAttention(nn.Module):
171
+ def __init__(self, n_state: int, n_head: int):
172
+ super().__init__()
173
+ self.n_head = n_head
174
+ self.query = Linear(n_state, n_state)
175
+ self.key = Linear(n_state, n_state, bias=False)
176
+ self.value = Linear(n_state, n_state)
177
+ self.out = Linear(n_state, n_state)
178
+
179
+ def forward(
180
+ self,
181
+ x: Tensor,
182
+ mask: Optional[Tensor] = None,
183
+ ):
184
+ q = self.query(x)
185
+ k = self.key(x)
186
+ v = self.value(x)
187
+
188
+ wv, qk = self.qkv_attention(q, k, v, mask)
189
+ return self.out(wv), qk
190
+
191
+ def qkv_attention(
192
+ self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None
193
+ ):
194
+ _, T, D = q.shape
195
+ scale = (D // self.n_head) ** -0.25
196
+ q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) * scale
197
+ k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 3, 1) * scale
198
+ v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
199
+
200
+ qk = q @ k # (B, n_head, T, T)
201
+ if mask is not None:
202
+ qk = qk + mask
203
+ qk = qk.float()
204
+
205
+ w = F.softmax(qk, dim=-1).to(q.dtype)
206
+ return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2), qk.detach()
207
+
208
+ class ResidualAttentionBlock(nn.Module):
209
+ def __init__(self, n_state: int, n_head: int):
210
+ super().__init__()
211
+
212
+ self.attn = MultiHeadAttention(n_state, n_head)
213
+ self.attn_ln = LayerNorm(n_state)
214
+
215
+ n_mlp = n_state * 4
216
+ self.mlp = nn.Sequential(
217
+ Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state)
218
+ )
219
+ self.mlp_ln = LayerNorm(n_state)
220
+
221
+ def forward(
222
+ self,
223
+ x: Tensor,
224
+ mask: Optional[Tensor] = None,
225
+ ):
226
+ x = x + self.attn(self.attn_ln(x.contiguous()), mask=mask)[0]
227
+ x = x + self.mlp(self.mlp_ln(x.contiguous()))
228
+ return x
229
+
230
+ class AudioEncoder(nn.Module):
231
+ def __init__(
232
+ self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int
233
+ ):
234
+ super().__init__()
235
+ self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1)
236
+ self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
237
+ self.positional_embedding = nn.Embedding(n_ctx, n_state)
238
+ self.positional_embedding.requires_grad_(False)
239
+ self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList(
240
+ [ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
241
+ )
242
+ self.avg_pooler = nn.AvgPool1d(2, stride=2)
243
+ self.after_norm = LayerNorm(n_state)
244
+ self.gradient_checkpointing = False
245
+
246
+ def forward(self, x: Tensor, x_len: Tensor) -> Tuple[Tensor, Tensor]:
247
+ T = x.size(-1)
248
+ x = F.gelu(self.conv1(x))
249
+ x = F.gelu(self.conv2(x))
250
+ x = x.permute(0, 2, 1) # (B, T // 2, n_state)
251
+ mask = make_non_pad_mask(x_len, T).unsqueeze(1) # (B, 1, T)
252
+ mask = mask_to_bias(mask[:, :, (T + 1) % 2::2], x.dtype) # (B, 1, T // 2)
253
+ x = (x + self.positional_embedding.weight[:x.shape[1], :]).to(x.dtype)
254
+ for block in self.blocks:
255
+ if self.gradient_checkpointing and self.training:
256
+ x = torch.utils.checkpoint.checkpoint(block, x, mask.unsqueeze(1))
257
+ else:
258
+ x = block(x, mask.unsqueeze(1))
259
+ x = x.permute(0, 2, 1)
260
+ x = self.avg_pooler(x)
261
+ x = x.permute(0, 2, 1)
262
+ x_len = (x_len + 1) // 2 // 2
263
+ x = self.after_norm(x.contiguous())
264
+ return x, x_len
265
+
266
+ class Adaptor(nn.Module):
267
+ def __init__(
268
+ self,
269
+ n_state: int = 1280,
270
+ n_hidden: int = 3072,
271
+ kernel_size: int = 7,
272
+ stride: int = 4
273
+ ):
274
+ super().__init__()
275
+ self.stride = stride
276
+ if self.stride != -1:
277
+ # print("self.stride: {}".format(self.stride))
278
+ self.conv = Conv1d(n_state, n_state, kernel_size, stride, padding=1)
279
+ self.linear1 = nn.Linear(n_state, 2048)
280
+ self.relu = nn.ReLU()
281
+ self.linear2 = nn.Linear(2048, n_hidden)
282
+ self.gradient_checkpointing = False
283
+
284
+ def forward(self, x: Tensor) -> Tuple[Tensor]:
285
+ T = x.size(-1)
286
+ if self.stride != -1:
287
+ if self.gradient_checkpointing and self.training:
288
+ x = torch.utils.checkpoint.checkpoint(self.conv, x.permute(0, 2, 1))
289
+ x = x.permute(0, 2, 1)
290
+ else:
291
+ x = x.permute(0, 2, 1)
292
+ x = F.gelu(self.conv(x))
293
+ x = x.permute(0, 2, 1)
294
+ if self.gradient_checkpointing and self.training:
295
+ x = torch.utils.checkpoint.checkpoint(self.linear1, x)
296
+ x = torch.utils.checkpoint.checkpoint(self.relu, x)
297
+ x = torch.utils.checkpoint.checkpoint(self.linear2, x)
298
+ else:
299
+ x = self.linear1(x)
300
+ x = self.relu(x)
301
+ x = self.linear2(x)
302
+ return x
303
+
304
+ class StepAudio2ForCausalLM(PreTrainedModel, GenerationMixin):
305
+ config_class = StepAudio2Config
306
+ main_input_name = "input_ids"
307
+ # Important: Add this attribute to make HF recognize it as a model with generation capability
308
+ # _keys_to_ignore_on_load_missing = ["lm_head.weight"]
309
+ supports_gradient_checkpointing = True # 新增,声明支持gradient checkpointing
310
+
311
+ def __init__(self, config: StepAudio2Config):
312
+ super().__init__(config)
313
+ if isinstance(config.torch_dtype, str):
314
+ dtype = getattr(torch, config.torch_dtype)
315
+ else:
316
+ dtype = config.torch_dtype
317
+ self.model = Qwen2Model(config.text_config)
318
+ self.bf16 = dtype==torch.bfloat16
319
+ self.encoder = AudioEncoder(
320
+ config.audio_encoder_config.n_mels, config.audio_encoder_config.n_audio_ctx, config.audio_encoder_config.n_audio_state,
321
+ config.audio_encoder_config.n_audio_head, config.audio_encoder_config.n_audio_layer
322
+ )
323
+ self.adapter = Adaptor(
324
+ config.audio_encoder_config.n_audio_state, config.audio_encoder_config.llm_dim,
325
+ config.audio_encoder_config.kernel_size, config.audio_encoder_config.adapter_stride
326
+ )
327
+ if self.bf16:
328
+ self.encoder = self.encoder.bfloat16()
329
+ self.adapter = self.adapter.bfloat16()
330
+ self.lm_head = torch.nn.Linear(
331
+ config.hidden_size,
332
+ config.vocab_size,
333
+ bias=False,
334
+ dtype=dtype
335
+ )
336
+ self.post_init()
337
+
338
+ def forward(
339
+ self,
340
+ input_ids=None,
341
+ wavs=None,
342
+ wav_lens=None,
343
+ attention_mask=None,
344
+ **kwargs
345
+ ):
346
+ hidden_states = self.model.embed_tokens(input_ids)
347
+ if wavs is not None:
348
+ if self.bf16:
349
+ wavs = wavs.bfloat16()
350
+ out, feat_lens = self.encoder(wavs, wav_lens)
351
+ out = self.adapter(out)
352
+ feat_lens = (feat_lens - 1) // 2 + 1
353
+ insert_location = torch.nonzero(input_ids == 151688)
354
+ insert_location[:,1] += 1
355
+ for idx in range(len(insert_location)):
356
+ i,s = insert_location[idx]
357
+ hidden_states[i][s : s+feat_lens[idx]] = out[idx][:feat_lens[idx]]
358
+
359
+ x = self.model(inputs_embeds=hidden_states, attention_mask=attention_mask)[0]
360
+ logits = self.lm_head(x)
361
+ return CausalLMOutputWithPast(
362
+ logits=logits,
363
+ past_key_values=None,
364
+ hidden_states=None,
365
+ attentions=None
366
+ )
367
+
368
+ def get_input_embeddings(self):
369
+ """Return the model's input embeddings - required for GenerationMixin"""
370
+ return self.model.embed_tokens
371
+
372
+ def get_output_embeddings(self):
373
+ """Return the model's output embeddings (LM head) - required for GenerationMixin"""
374
+ return self.lm_head
375
+
376
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs):
377
+ """Prepare inputs for generation - required for GenerationMixin"""
378
+ # Keep the wavs and wav_lens from the initial call
379
+ wavs = kwargs.get("wavs", None)
380
+ wav_lens = kwargs.get("wav_lens", None)
381
+
382
+ # For generation steps after the first, we don't need to process audio again
383
+ # because the audio tokens have already been replaced in the input sequence
384
+ if "past_key_values" in kwargs and kwargs["past_key_values"] is not None:
385
+ # We're in a generation step, no need to process audio again
386
+ return {
387
+ "input_ids": input_ids,
388
+ "attention_mask": attention_mask,
389
+ "past_key_values": kwargs.get("past_key_values")
390
+ }
391
+
392
+ # First generation step, include audio processing
393
+ return {
394
+ "input_ids": input_ids,
395
+ "attention_mask": attention_mask,
396
+ "wavs": wavs,
397
+ "wav_lens": wav_lens
398
+ }
399
+
400
+ def _reorder_cache(self, past_key_values, beam_idx):
401
+ """Reorder the cache for beam search - required for GenerationMixin if using beam search"""
402
+ # If you're not using past_key_values or beam search, this can be a simple pass-through
403
+ # Otherwise implement according to your model's cache structure
404
+ return past_key_values
405
+
406
+ def _set_gradient_checkpointing(self, module, value=False):
407
+ # For Qwen2Model
408
+ if hasattr(self.model, 'gradient_checkpointing'):
409
+ self.model.gradient_checkpointing = value
410
+
411
+ # Add the missing _gradient_checkpointing_func method to Qwen2Model
412
+ # This is what Qwen2Model tries to use when gradient_checkpointing=True
413
+ if value and not hasattr(self.model, '_gradient_checkpointing_func'):
414
+ def _gradient_checkpointing_func(module_to_run, *args, **kwargs):
415
+ # This function wraps torch.utils.checkpoint.checkpoint
416
+ # and is used by Qwen2Model to perform checkpointing
417
+ return torch.utils.checkpoint.checkpoint(module_to_run, *args, **kwargs)
418
+
419
+ self.model._gradient_checkpointing_func = _gradient_checkpointing_func
420
+
421
+ # For custom encoder and adapter
422
+ if hasattr(self.encoder, 'gradient_checkpointing'):
423
+ self.encoder.gradient_checkpointing = value
424
+ if hasattr(self.adapter, 'gradient_checkpointing'):
425
+ self.adapter.gradient_checkpointing = value
special_tokens_map.json ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|EOT|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|BOT|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<|CALL_START|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "<|CALL_END|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "<|THINK_START|>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "<|THINK_END|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ {
46
+ "content": "<|IMG_START|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ {
53
+ "content": "<|IMG_END|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false
58
+ },
59
+ {
60
+ "content": "<|META_START|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ {
67
+ "content": "<|META_END|>",
68
+ "lstrip": false,
69
+ "normalized": false,
70
+ "rstrip": false,
71
+ "single_word": false
72
+ },
73
+ {
74
+ "content": "<im_patch>",
75
+ "lstrip": false,
76
+ "normalized": false,
77
+ "rstrip": false,
78
+ "single_word": false
79
+ },
80
+ {
81
+ "content": "<im_start>",
82
+ "lstrip": false,
83
+ "normalized": false,
84
+ "rstrip": false,
85
+ "single_word": false
86
+ },
87
+ {
88
+ "content": "<im_end>",
89
+ "lstrip": false,
90
+ "normalized": false,
91
+ "rstrip": false,
92
+ "single_word": false
93
+ },
94
+ {
95
+ "content": "<dream>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false
100
+ },
101
+ {
102
+ "content": "<dream_start>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false
107
+ },
108
+ {
109
+ "content": "<dream_end>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false
114
+ },
115
+ {
116
+ "content": "<|MASK_1e69f|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false
121
+ },
122
+ {
123
+ "content": "<|UNMASK_1e69f|>",
124
+ "lstrip": false,
125
+ "normalized": false,
126
+ "rstrip": false,
127
+ "single_word": false
128
+ },
129
+ {
130
+ "content": "<video_start>",
131
+ "lstrip": false,
132
+ "normalized": false,
133
+ "rstrip": false,
134
+ "single_word": false
135
+ },
136
+ {
137
+ "content": "<video_end>",
138
+ "lstrip": false,
139
+ "normalized": false,
140
+ "rstrip": false,
141
+ "single_word": false
142
+ },
143
+ {
144
+ "content": "<patch_start>",
145
+ "lstrip": false,
146
+ "normalized": false,
147
+ "rstrip": false,
148
+ "single_word": false
149
+ },
150
+ {
151
+ "content": "<patch_end>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false
156
+ },
157
+ {
158
+ "content": "<patch_newline>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false
163
+ },
164
+ {
165
+ "content": "<audio_start>",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false
170
+ },
171
+ {
172
+ "content": "<audio_end>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false
177
+ },
178
+ {
179
+ "content": "<audio_patch>",
180
+ "lstrip": false,
181
+ "normalized": false,
182
+ "rstrip": false,
183
+ "single_word": false
184
+ },
185
+ {
186
+ "content": "<audio_patch_pad>",
187
+ "lstrip": false,
188
+ "normalized": false,
189
+ "rstrip": false,
190
+ "single_word": false
191
+ },
192
+ {
193
+ "content": "<|SC|>",
194
+ "lstrip": false,
195
+ "normalized": false,
196
+ "rstrip": false,
197
+ "single_word": false
198
+ },
199
+ {
200
+ "content": "<tts_start>",
201
+ "lstrip": false,
202
+ "normalized": false,
203
+ "rstrip": false,
204
+ "single_word": false
205
+ },
206
+ {
207
+ "content": "<tts_end>",
208
+ "lstrip": false,
209
+ "normalized": false,
210
+ "rstrip": false,
211
+ "single_word": false
212
+ },
213
+ {
214
+ "content": "<tts_pad>",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false
219
+ }
220
+ ],
221
+ "eos_token": {
222
+ "content": "<|endoftext|>",
223
+ "lstrip": false,
224
+ "normalized": false,
225
+ "rstrip": false,
226
+ "single_word": false
227
+ },
228
+ "pad_token": {
229
+ "content": "<|endoftext|>",
230
+ "lstrip": false,
231
+ "normalized": false,
232
+ "rstrip": false,
233
+ "single_word": false
234
+ }
235
+ }
token2wav/campplus.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6ac6a63997761ae2997373e2ee1c47040854b4b759ea41ec48e4e42df0f4d73
3
+ size 28303423
token2wav/flow.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15ccff24256ff61537c7f8b51e025116b83405f3fb017b54b008fc97da115446
3
+ size 623466603
token2wav/flow.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flow: !new:cosyvoice2.flow.flow.CausalMaskedDiffWithXvec
2
+ input_size: 512
3
+ output_size: 80
4
+ spk_embed_dim: 192
5
+ output_type: 'mel'
6
+ vocab_size: 6561
7
+ encoder: !new:cosyvoice2.transformer.upsample_encoder_v2.UpsampleConformerEncoderV2
8
+ input_size: 512
9
+ output_size: 512
10
+ input_layer: 'linear'
11
+ pre_lookahead_len: 3
12
+ num_blocks: 6
13
+ num_up_blocks: 4
14
+ up_stride: 2
15
+ up_scale_factor: 2
16
+ attention_heads: 8
17
+ pos_enc_layer_type: 'rel_pos_espnet'
18
+ selfattention_layer_type: 'rel_selfattn'
19
+ key_bias: true
20
+ linear_units: 2048
21
+ dropout_rate: 0.1
22
+ positional_dropout_rate: 0.1
23
+ attention_dropout_rate: 0.1
24
+ normalize_before: True
25
+ decoder: !new:cosyvoice2.flow.flow_matching.CausalConditionalCFM
26
+ inference_cfg_rate: 0.7
27
+ estimator: !new:cosyvoice2.flow.decoder_dit.DiT
28
+ in_channels: 320
29
+ out_channels: 80
30
+ mlp_ratio: 4.0
31
+ depth: 16
32
+ num_heads: 8
33
+ head_dim: 64
34
+ hidden_size: 512
token2wav/hift.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3386cc880324d4e98e05987b99107f49e40ed925b8ecc87c1f4939432d429879
3
+ size 83390254
token2wav/speech_tokenizer_v2_25hz.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d43342aa12163a80bf07bffb94c9de2e120a8df2f9917cd2f642e7f4219c6f71
3
+ size 496082973
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d48dcfbb2cf303621b5746db35e824e8bfe9777b1f271f827f6659759c1fe0cc
3
+ size 8294393
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
vocab.json ADDED
The diff for this file is too large to render. See raw diff