bu1 commited on
Commit
0024a9c
·
verified ·
1 Parent(s): 5f63f06

Upload model

Browse files
Files changed (2) hide show
  1. config.json +4 -0
  2. modeling_transformer.py +414 -0
config.json CHANGED
@@ -2,6 +2,10 @@
2
  "architectures": [
3
  "transformerModel"
4
  ],
 
 
 
 
5
  "batch_size": 64,
6
  "dropout": 0.1,
7
  "ffn_num_hiddens": 64,
 
2
  "architectures": [
3
  "transformerModel"
4
  ],
5
+ "auto_map": {
6
+ "AutoConfig": "modeling_transformer.transformerConfig",
7
+ "AutoModelForCausalLM": "modeling_transformer.transformerModel"
8
+ },
9
  "batch_size": 64,
10
  "dropout": 0.1,
11
  "ffn_num_hiddens": 64,
modeling_transformer.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel
2
+
3
+ import math
4
+ import pandas as pd
5
+ import torch
6
+ from torch import nn
7
+ from d2l import torch as d2l
8
+ from transformers import PretrainedConfig
9
+
10
+ #基于位置的前馈网络
11
+ #@save
12
+ class PositionWiseFFN(nn.Module):
13
+ """基于位置的前馈网络"""
14
+ def __init__(self, ffn_num_input, ffn_num_hiddens, ffn_num_outputs,
15
+ **kwargs):
16
+ super(PositionWiseFFN, self).__init__(**kwargs)
17
+ self.dense1 = nn.Linear(ffn_num_input, ffn_num_hiddens)
18
+ self.relu = nn.ReLU()
19
+ self.dense2 = nn.Linear(ffn_num_hiddens, ffn_num_outputs)
20
+
21
+ def forward(self, X):
22
+ return self.dense2(self.relu(self.dense1(X)))
23
+
24
+ def transpose_qkv(X, num_heads):
25
+ """为了多注意力头的并行计算而变换形状
26
+
27
+ Defined in :numref:`sec_multihead-attention`"""
28
+ # 输入X的形状:(batch_size,查询或者“键-值”对的个数,num_hiddens)
29
+ # 输出X的形状:(batch_size,查询或者“键-值”对的个数,num_heads,
30
+ # num_hiddens/num_heads)
31
+ X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
32
+
33
+ # 输出X的形状:(batch_size,num_heads,查询或者“键-值”对的个数,
34
+ # num_hiddens/num_heads)
35
+ X = X.permute(0, 2, 1, 3)
36
+
37
+ # 最终输出的形状:(batch_size*num_heads,查询或者“键-值”对的个数,
38
+ # num_hiddens/num_heads)
39
+ return X.reshape(-1, X.shape[2], X.shape[3])
40
+
41
+ def transpose_output(X, num_heads):
42
+ """逆转transpose_qkv函数的操作
43
+
44
+ Defined in :numref:`sec_multihead-attention`"""
45
+ X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
46
+ X = X.permute(0, 2, 1, 3)
47
+ return X.reshape(X.shape[0], X.shape[1], -1)
48
+
49
+ def sequence_mask(X, valid_len, value=0):
50
+ """在序列中屏蔽不相关的项
51
+
52
+ Defined in :numref:`sec_seq2seq_decoder`"""
53
+ maxlen = X.size(1)
54
+ mask = torch.arange((maxlen), dtype=torch.float32,
55
+ device=X.device)[None, :] < valid_len[:, None]
56
+ X[~mask] = value
57
+ return X
58
+
59
+ def masked_softmax(X, valid_lens):
60
+ """通过在最后一个轴上掩蔽元素来执行softmax操作
61
+
62
+ Defined in :numref:`sec_attention-scoring-functions`"""
63
+ # X:3D张量,valid_lens:1D或2D张量
64
+ if valid_lens is None:
65
+ return nn.functional.softmax(X, dim=-1)
66
+ else:
67
+ shape = X.shape
68
+ if valid_lens.dim() == 1:
69
+ valid_lens = torch.repeat_interleave(valid_lens, shape[1])
70
+ else:
71
+ valid_lens = valid_lens.reshape(-1)
72
+ # 最后一轴上被掩蔽的元素使用一个非常大的负值替换,从而其softmax输出为0
73
+ X = sequence_mask(X.reshape(-1, shape[-1]), valid_lens,
74
+ value=-1e6)
75
+ return nn.functional.softmax(X.reshape(shape), dim=-1)
76
+
77
+ class DotProductAttention(nn.Module):
78
+ """缩放点积注意力
79
+
80
+ Defined in :numref:`subsec_additive-attention`"""
81
+ def __init__(self, dropout, **kwargs):
82
+ super(DotProductAttention, self).__init__(**kwargs)
83
+ self.dropout = nn.Dropout(dropout)
84
+
85
+ # queries的形状:(batch_size,查询的个数,d)
86
+ # keys的形状:(batch_size,“键-值”对的个数,d)
87
+ # values的形状:(batch_size,“键-值”对的个数,值的维度)
88
+ # valid_lens的形状:(batch_size,)或者(batch_size,查询的个数)
89
+ def forward(self, queries, keys, values, valid_lens=None):
90
+ d = queries.shape[-1]
91
+ # 设置transpose_b=True为了交换keys的最后两个维度
92
+ scores = torch.bmm(queries, keys.transpose(1,2)) / math.sqrt(d)
93
+ self.attention_weights = masked_softmax(scores, valid_lens)
94
+ return torch.bmm(self.dropout(self.attention_weights), values)
95
+
96
+ class MultiHeadAttention(nn.Module):
97
+ """多头注意力
98
+
99
+ Defined in :numref:`sec_multihead-attention`"""
100
+ def __init__(self, key_size, query_size, value_size, num_hiddens,
101
+ num_heads, dropout, bias=False, **kwargs):
102
+ super(MultiHeadAttention, self).__init__(**kwargs)
103
+ self.num_heads = num_heads
104
+ self.attention = DotProductAttention(dropout)
105
+ self.W_q = nn.Linear(query_size, num_hiddens, bias=bias)
106
+ self.W_k = nn.Linear(key_size, num_hiddens, bias=bias)
107
+ self.W_v = nn.Linear(value_size, num_hiddens, bias=bias)
108
+ self.W_o = nn.Linear(num_hiddens, num_hiddens, bias=bias)
109
+
110
+ def forward(self, queries, keys, values, valid_lens):
111
+ # queries,keys,values的形状:
112
+ # (batch_size,查询或者“键-值”对的个数,num_hiddens)
113
+ # valid_lens 的形状:
114
+ # (batch_size,)或(batch_size,查询的个数)
115
+ # 经过变换后,输出的queries,keys,values 的形状:
116
+ # (batch_size*num_heads,查询或者“键-值”对的个数,
117
+ # num_hiddens/num_heads)
118
+ queries = transpose_qkv(self.W_q(queries), self.num_heads)
119
+ keys = transpose_qkv(self.W_k(keys), self.num_heads)
120
+ values = transpose_qkv(self.W_v(values), self.num_heads)
121
+
122
+ if valid_lens is not None:
123
+ # 在轴0,将第一项(标量或者矢量)复制num_heads次,
124
+ # 然后如此复制第二项,然后诸如此类。
125
+ valid_lens = torch.repeat_interleave(
126
+ valid_lens, repeats=self.num_heads, dim=0)
127
+
128
+ # output的形状:(batch_size*num_heads,查询的个数,
129
+ # num_hiddens/num_heads)
130
+ output = self.attention(queries, keys, values, valid_lens)
131
+
132
+ # output_concat的形状:(batch_size,查询的个数,num_hiddens)
133
+ output_concat = transpose_output(output, self.num_heads)
134
+ return self.W_o(output_concat)
135
+
136
+
137
+ #残差连接和层规范化
138
+ #@save
139
+ class AddNorm(nn.Module):
140
+ """残差连接后进行层规范化"""
141
+ def __init__(self, normalized_shape, dropout, **kwargs):
142
+ super(AddNorm, self).__init__(**kwargs)
143
+ self.dropout = nn.Dropout(dropout)
144
+ self.ln = nn.LayerNorm(normalized_shape)
145
+
146
+ def forward(self, X, Y):
147
+ return self.ln(self.dropout(Y) + X)
148
+
149
+ #编码器,Transformer编码器中的任何层都不会改变其输入的形状
150
+ #@save
151
+ class EncoderBlock(nn.Module):
152
+ """Transformer编码器块"""
153
+ def __init__(self, key_size, query_size, value_size, num_hiddens,
154
+ norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
155
+ dropout, use_bias=False, **kwargs):
156
+ super(EncoderBlock, self).__init__(**kwargs)
157
+ self.attention = MultiHeadAttention(
158
+ key_size, query_size, value_size, num_hiddens, num_heads, dropout,
159
+ use_bias)
160
+ self.addnorm1 = AddNorm(norm_shape, dropout)
161
+ self.ffn = PositionWiseFFN(
162
+ ffn_num_input, ffn_num_hiddens, num_hiddens)
163
+ self.addnorm2 = AddNorm(norm_shape, dropout)
164
+
165
+ def forward(self, X, valid_lens):
166
+ Y = self.addnorm1(X, self.attention(X, X, X, valid_lens))
167
+ return self.addnorm2(Y, self.ffn(Y))
168
+
169
+ class PositionalEncoding(nn.Module):
170
+ """位置编码
171
+
172
+ Defined in :numref:`sec_self-attention-and-positional-encoding`"""
173
+ def __init__(self, num_hiddens, dropout, max_len=1000):
174
+ super(PositionalEncoding, self).__init__()
175
+ self.dropout = nn.Dropout(dropout)
176
+ # 创建一个足够长的P
177
+ self.P = torch.zeros((1, max_len, num_hiddens))
178
+ X = torch.arange(max_len, dtype=torch.float32).reshape(
179
+ -1, 1) / torch.pow(10000, torch.arange(
180
+ 0, num_hiddens, 2, dtype=torch.float32) / num_hiddens)
181
+ self.P[:, :, 0::2] = torch.sin(X)
182
+ self.P[:, :, 1::2] = torch.cos(X)
183
+
184
+ def forward(self, X):
185
+ X = X + self.P[:, :X.shape[1], :].to(X.device)
186
+ return self.dropout(X)
187
+
188
+ class Encoder(nn.Module):
189
+ """编码器-解码器架构的基本编码器接口"""
190
+ def __init__(self, **kwargs):
191
+ super(Encoder, self).__init__(**kwargs)
192
+
193
+ def forward(self, X, *args):
194
+ raise NotImplementedError
195
+
196
+
197
+
198
+ ###解码器
199
+ class DecoderBlock(nn.Module):
200
+ """解码器中第i个块"""
201
+ def __init__(self, key_size, query_size, value_size, num_hiddens,
202
+ norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
203
+ dropout, i, **kwargs):
204
+ super(DecoderBlock, self).__init__(**kwargs)
205
+ self.i = i
206
+ self.attention1 = MultiHeadAttention(
207
+ key_size, query_size, value_size, num_hiddens, num_heads, dropout)
208
+ self.addnorm1 = AddNorm(norm_shape, dropout)
209
+ self.attention2 = MultiHeadAttention(
210
+ key_size, query_size, value_size, num_hiddens, num_heads, dropout)
211
+ self.addnorm2 = AddNorm(norm_shape, dropout)
212
+ self.ffn = PositionWiseFFN(ffn_num_input, ffn_num_hiddens,
213
+ num_hiddens)
214
+ self.addnorm3 = AddNorm(norm_shape, dropout)
215
+
216
+ def forward(self, X, state):
217
+ enc_outputs, enc_valid_lens = state[0], state[1]
218
+ # 训练阶段,输出序列的所有词元都在同一时间处理,
219
+ # 因此state[2][self.i]初始化为None。
220
+ # 预测阶段,输出序列是通过词元一个接着一个解码的,
221
+ # 因此state[2][self.i]包含着直到当前时间步第i个块解码的输出表示
222
+ if state[2][self.i] is None:
223
+ key_values = X
224
+ else:
225
+ key_values = torch.cat((state[2][self.i], X), axis=1)
226
+ state[2][self.i] = key_values
227
+ if self.training:
228
+ batch_size, num_steps, _ = X.shape
229
+ # dec_valid_lens的开头:(batch_size,num_steps),
230
+ # 其中每一行是[1,2,...,num_steps]
231
+ dec_valid_lens = torch.arange(
232
+ 1, num_steps + 1, device=X.device).repeat(batch_size, 1)
233
+ else:
234
+ dec_valid_lens = None
235
+
236
+ # 自注意力
237
+ X2 = self.attention1(X, key_values, key_values, dec_valid_lens)
238
+ Y = self.addnorm1(X, X2)
239
+ # 编码器-解码器注意力。
240
+ # enc_outputs的开头:(batch_size,num_steps,num_hiddens)
241
+ Y2 = self.attention2(Y, enc_outputs, enc_outputs, enc_valid_lens)
242
+ Z = self.addnorm2(Y, Y2)
243
+ return self.addnorm3(Z, self.ffn(Z)), state
244
+
245
+ class Decoder(nn.Module):
246
+ """编码器-解码器架构的基本解码器接口
247
+
248
+ Defined in :numref:`sec_encoder-decoder`"""
249
+ def __init__(self, **kwargs):
250
+ super(Decoder, self).__init__(**kwargs)
251
+
252
+ def init_state(self, enc_outputs, *args):
253
+ raise NotImplementedError
254
+
255
+ def forward(self, X, state):
256
+ raise NotImplementedError
257
+
258
+ class AttentionDecoder(Decoder):
259
+ """带有注意力机制解码器的基本接口
260
+
261
+ Defined in :numref:`sec_seq2seq_attention`"""
262
+ def __init__(self, **kwargs):
263
+ super(AttentionDecoder, self).__init__(**kwargs)
264
+
265
+ @property
266
+ def attention_weights(self):
267
+ raise NotImplementedError
268
+
269
+
270
+ #@save
271
+ class TransformerEncoder(Encoder):
272
+ """Transformer编码器"""
273
+ def __init__(self, vocab_size, key_size, query_size, value_size,
274
+ num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
275
+ num_heads, num_layers, dropout, use_bias=False, **kwargs):
276
+ super(TransformerEncoder, self).__init__(**kwargs)
277
+ self.num_hiddens = num_hiddens
278
+ self.embedding = nn.Embedding(vocab_size, num_hiddens)
279
+ self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
280
+ self.blks = nn.Sequential()
281
+ for i in range(num_layers):
282
+ self.blks.add_module("block"+str(i),
283
+ EncoderBlock(key_size, query_size, value_size, num_hiddens,
284
+ norm_shape, ffn_num_input, ffn_num_hiddens,
285
+ num_heads, dropout, use_bias))
286
+
287
+ def forward(self, X, valid_lens, *args):
288
+ # 因为位置编码值在-1和1之间,
289
+ # 因此嵌入值乘以嵌入维度的平方根进行缩放,
290
+ # 然后再与位置编码相加。
291
+ X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
292
+ self.attention_weights = [None] * len(self.blks)
293
+ for i, blk in enumerate(self.blks):
294
+ X = blk(X, valid_lens)
295
+ self.attention_weights[
296
+ i] = blk.attention.attention.attention_weights
297
+ return X
298
+
299
+
300
+ class TransformerDecoder(AttentionDecoder):
301
+ def __init__(self, vocab_size, key_size, query_size, value_size,
302
+ num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
303
+ num_heads, num_layers, dropout, **kwargs):
304
+ super(TransformerDecoder, self).__init__(**kwargs)
305
+ self.num_hiddens = num_hiddens
306
+ self.num_layers = num_layers
307
+ self.embedding = nn.Embedding(vocab_size, num_hiddens)
308
+ self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
309
+ self.blks = nn.Sequential()
310
+ for i in range(num_layers):
311
+ self.blks.add_module("block"+str(i),
312
+ DecoderBlock(key_size, query_size, value_size, num_hiddens,
313
+ norm_shape, ffn_num_input, ffn_num_hiddens,
314
+ num_heads, dropout, i))
315
+ self.dense = nn.Linear(num_hiddens, vocab_size)
316
+
317
+ def init_state(self, enc_outputs, enc_valid_lens, *args):
318
+ return [enc_outputs, enc_valid_lens, [None] * self.num_layers]
319
+
320
+ def forward(self, X, state):
321
+ X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
322
+ self._attention_weights = [[None] * len(self.blks) for _ in range (2)]
323
+ for i, blk in enumerate(self.blks):
324
+ X, state = blk(X, state)
325
+ # 解码器自注意力权重
326
+ self._attention_weights[0][
327
+ i] = blk.attention1.attention.attention_weights
328
+ # “编码器-解码器”自注意力权重
329
+ self._attention_weights[1][
330
+ i] = blk.attention2.attention.attention_weights
331
+ return self.dense(X), state
332
+
333
+ @property
334
+ def attention_weights(self):
335
+ return self._attention_weights
336
+
337
+ class transformerConfig(PretrainedConfig):
338
+ model_type = "custom_transformer"
339
+
340
+ def __init__(
341
+ self,
342
+ src_vocab_len : int =184,
343
+ tgt_vocab : int =201,
344
+ num_hiddens : int =32,
345
+ num_layers : int =2,
346
+ dropout : int =0.1,
347
+ batch_size : int =64,
348
+ num_steps : int =10,
349
+ lr : int =0.005,
350
+ num_epochs : int =200,
351
+ # device=d2l.try_gpu(),
352
+ ffn_num_input : int =32,
353
+ ffn_num_hiddens : int =64,
354
+ num_heads : int =4,
355
+ key_size : int =32,
356
+ query_size : int =32,
357
+ value_size : int =32,
358
+ norm_shape : int =[32],
359
+
360
+ # block_type="bottleneck",
361
+ # layers: List[int] = [3, 4, 6, 3],
362
+ # num_classes: int = 1000,
363
+ # input_channels: int = 3,
364
+ # cardinality: int = 1,
365
+ # base_width: int = 64,
366
+ # stem_width: int = 64,
367
+ # stem_type: str = "",
368
+ # avg_down: bool = False,
369
+
370
+ **kwargs,
371
+ ):
372
+ # if block_type not in ["basic", "bottleneck"]:
373
+ # raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.")
374
+ # if stem_type not in ["", "deep", "deep-tiered"]:
375
+ # raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.")
376
+ self.src_vocab_len = src_vocab_len
377
+ self.tgt_vocab = tgt_vocab
378
+ self.num_hiddens = num_hiddens
379
+ self.num_layers = num_layers
380
+ self.dropout = dropout
381
+ self.batch_size = batch_size
382
+ self.num_steps = num_steps
383
+ self.lr = lr
384
+ self.num_epochs = num_epochs
385
+ self.ffn_num_input = ffn_num_input
386
+ self.ffn_num_hiddens = ffn_num_hiddens
387
+ self.num_heads = num_heads
388
+ self.key_size = key_size
389
+ self.query_size = query_size
390
+ self.value_size = value_size
391
+ self.norm_shape = norm_shape
392
+
393
+ super().__init__(**kwargs)
394
+
395
+ class transformerModel(PreTrainedModel):
396
+
397
+ config_class = transformerConfig
398
+
399
+ def __init__(self, config):
400
+ super().__init__(config)
401
+ self.encoder = TransformerEncoder(
402
+ config.src_vocab_len, config.key_size, config.query_size, config.value_size, config.num_hiddens,
403
+ config.norm_shape, config.ffn_num_input, config.ffn_num_hiddens, config.num_heads,
404
+ config.num_layers, config.dropout)
405
+
406
+ self.decoder = TransformerDecoder(
407
+ config.tgt_vocab, config.key_size, config.query_size, config.value_size, config.num_hiddens,
408
+ config.norm_shape, config.ffn_num_input, config.ffn_num_hiddens, config.num_heads,
409
+ config.num_layers, config.dropout)
410
+
411
+ def forward(self, enc_X, dec_X, *args):
412
+ enc_outputs = self.encoder(enc_X, *args)
413
+ dec_state = self.decoder.init_state(enc_outputs, *args)
414
+ return self.decoder(dec_X, dec_state)