bu1 commited on
Commit
3be7fe8
·
verified ·
1 Parent(s): b292063

Upload model

Browse files
Files changed (3) hide show
  1. config.json +3 -1
  2. model.safetensors +1 -1
  3. modeling_IQtransformer.py +4 -2
config.json CHANGED
@@ -11,7 +11,9 @@
11
  "ffn_num_input": 32,
12
  "key_size": 32,
13
  "model_type": "IQsignal_transformer",
14
- "norm_shape": 32,
 
 
15
  "num_heads": 4,
16
  "num_hiddens": 32,
17
  "num_layers": 2,
 
11
  "ffn_num_input": 32,
12
  "key_size": 32,
13
  "model_type": "IQsignal_transformer",
14
+ "norm_shape": [
15
+ 32
16
+ ],
17
  "num_heads": 4,
18
  "num_hiddens": 32,
19
  "num_layers": 2,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f80269a38e99e9ac23a319b1a548a35ecd51ddb66aadc3b05b5ea85a32179498
3
  size 79108
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0086aa0b6c4b67c2d818b2038c0232b0c8d86445867f1d73489eb2e5f4bf41d
3
  size 79108
modeling_IQtransformer.py CHANGED
@@ -5,6 +5,8 @@ import math
5
 
6
  from transformers import PretrainedConfig
7
 
 
 
8
  class transformerConfig(PretrainedConfig):
9
  model_type = "IQsignal_transformer"
10
 
@@ -15,7 +17,7 @@ class transformerConfig(PretrainedConfig):
15
  query_size : int = 32,
16
  value_size : int = 32,
17
  num_hiddens : int = 32,
18
- norm_shape : int = 32,
19
  ffn_num_input : int = 32,
20
  ffn_num_hiddens : int = 64,
21
  num_heads : int = 4,
@@ -211,7 +213,7 @@ class transformerModel(PreTrainedModel):
211
  self.Linear = nn.Linear(config.vocab_size, config.vocab_size)
212
  # self.embedding = nn.Embedding(vocab_size, num_hiddens) # 将输入vocab_size的维度 转化为 想要的num_hiddens维度
213
  # self.pos_encoding = d2l.PositionalEncoding(num_hiddens, dropout)
214
- self.ln = nn.LayerNorm(config.vocab_size)
215
  self.blks = nn.Sequential()
216
  for i in range(config.num_layers):
217
  self.blks.add_module("block" + str(i),
 
5
 
6
  from transformers import PretrainedConfig
7
 
8
+ # 把transformerConfig和transformerModel都放在一个文件中,避免类别不匹配引起的错误
9
+
10
  class transformerConfig(PretrainedConfig):
11
  model_type = "IQsignal_transformer"
12
 
 
17
  query_size : int = 32,
18
  value_size : int = 32,
19
  num_hiddens : int = 32,
20
+ norm_shape : int = [32],
21
  ffn_num_input : int = 32,
22
  ffn_num_hiddens : int = 64,
23
  num_heads : int = 4,
 
213
  self.Linear = nn.Linear(config.vocab_size, config.vocab_size)
214
  # self.embedding = nn.Embedding(vocab_size, num_hiddens) # 将输入vocab_size的维度 转化为 想要的num_hiddens维度
215
  # self.pos_encoding = d2l.PositionalEncoding(num_hiddens, dropout)
216
+ self.ln = nn.LayerNorm(config.norm_shape)
217
  self.blks = nn.Sequential()
218
  for i in range(config.num_layers):
219
  self.blks.add_module("block" + str(i),