jer233 commited on
Commit
2172586
·
verified ·
1 Parent(s): 377127e

Create meta_train.py

Browse files
Files changed (1) hide show
  1. meta_train.py +177 -0
meta_train.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from collections import namedtuple
4
+ import math
5
+ from utils import DEVICE
6
+ from pytorch_transformers.modeling_bert import (
7
+ BertEncoder,
8
+ BertPreTrainedModel,
9
+ BertConfig,
10
+ )
11
+
12
+
13
+ class GeLU(nn.Module):
14
+ """Implementation of the gelu activation function.
15
+ For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
16
+ 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
17
+ Also see https://arxiv.org/abs/1606.08415
18
+ """
19
+
20
+ def __init__(self):
21
+ super().__init__()
22
+
23
+ def forward(self, x):
24
+ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
25
+
26
+
27
+ class BertLayerNorm(nn.Module):
28
+ def __init__(self, hidden_size, eps=1e-12):
29
+ """Construct a layernorm module in the TF style (epsilon inside the square root)."""
30
+ super(BertLayerNorm, self).__init__()
31
+ self.weight = nn.Parameter(torch.ones(hidden_size))
32
+ self.bias = nn.Parameter(torch.zeros(hidden_size))
33
+ self.variance_epsilon = eps
34
+
35
+ def forward(self, x):
36
+ u = x.mean(-1, keepdim=True)
37
+ s = (x - u).pow(2).mean(-1, keepdim=True)
38
+ x = (x - u) / torch.sqrt(s + self.variance_epsilon)
39
+ return self.weight * x + self.bias
40
+
41
+
42
+ class mlp_meta(nn.Module):
43
+ def __init__(self, config):
44
+ super().__init__()
45
+ self.mlp = nn.Sequential(
46
+ nn.Linear(config.hid_dim, config.hid_dim),
47
+ GeLU(),
48
+ BertLayerNorm(config.hid_dim, eps=1e-12),
49
+ nn.Dropout(config.dropout),
50
+ )
51
+
52
+ def forward(self, x):
53
+ return self.mlp(x)
54
+
55
+
56
+ class Bert_Transformer_Layer(BertPreTrainedModel):
57
+ def __init__(self, fusion_config):
58
+ super().__init__(BertConfig(**fusion_config))
59
+ bertconfig_fusion = BertConfig(**fusion_config)
60
+ self.encoder = BertEncoder(bertconfig_fusion)
61
+ self.init_weights()
62
+
63
+ def forward(self, input, mask=None):
64
+ """
65
+ input:(bs, 4, dim)
66
+ """
67
+ batch, feats, dim = input.size()
68
+ if mask is not None:
69
+ mask_ = torch.ones(size=(batch, feats), device=mask.device)
70
+ mask_[:, 1:] = mask
71
+ mask_ = torch.bmm(
72
+ mask_.view(batch, 1, -1).transpose(1, 2), mask_.view(batch, 1, -1)
73
+ )
74
+ mask_ = mask_.unsqueeze(1)
75
+
76
+ else:
77
+ mask = torch.Tensor([1.0]).to(input.device)
78
+ mask_ = mask.repeat(batch, 1, feats, feats)
79
+
80
+ extend_mask = (1 - mask_) * -10000
81
+ assert not extend_mask.requires_grad
82
+ head_mask = [None] * self.config.num_hidden_layers
83
+
84
+ enc_output = self.encoder(input, extend_mask, head_mask=head_mask)
85
+ output = enc_output[0]
86
+ all_attention = enc_output[1]
87
+
88
+ return output, all_attention
89
+
90
+
91
+ class mmdPreModel(nn.Module):
92
+ def __init__(
93
+ self,
94
+ config,
95
+ num_mlp=0,
96
+ transformer_flag=False,
97
+ num_hidden_layers=1,
98
+ mlp_flag=True,
99
+ ):
100
+ super(mmdPreModel, self).__init__()
101
+ self.num_mlp = num_mlp
102
+ self.transformer_flag = transformer_flag
103
+ self.mlp_flag = mlp_flag
104
+ token_num = config.token_num
105
+ self.mlp = nn.Sequential(
106
+ nn.Linear(config.in_dim, config.hid_dim),
107
+ GeLU(),
108
+ BertLayerNorm(config.hid_dim, eps=1e-12),
109
+ nn.Dropout(config.dropout),
110
+ # nn.Linear(config.hid_dim, config.out_dim),
111
+ )
112
+ self.fusion_config = {
113
+ "hidden_size": config.in_dim,
114
+ "num_hidden_layers": num_hidden_layers,
115
+ "num_attention_heads": 4,
116
+ "output_attentions": True,
117
+ }
118
+ if self.num_mlp > 0:
119
+ self.mlp2 = nn.ModuleList([mlp_meta(config) for _ in range(self.num_mlp)])
120
+ if self.transformer_flag:
121
+ self.transformer = Bert_Transformer_Layer(self.fusion_config)
122
+ self.feature = nn.Linear(config.hid_dim * token_num, config.out_dim)
123
+
124
+ def forward(self, features):
125
+ """
126
+ input: [batch, token_num, hidden_size], output: [batch, token_num * config.out_dim]
127
+ """
128
+
129
+ if self.transformer_flag:
130
+ features, _ = self.transformer(features)
131
+ if self.mlp_flag:
132
+ features = self.mlp(features)
133
+
134
+ if self.num_mlp > 0:
135
+ # features = self.mlp2(features)
136
+ for _ in range(1):
137
+ for mlp in self.mlp2:
138
+ features = mlp(features)
139
+
140
+ features = self.feature(features.view(features.shape[0], -1))
141
+ return features # features.view(features.shape[0], -1)
142
+
143
+
144
+ class NetLoader:
145
+ def __init__(self):
146
+ token_num, hidden_size = 100, 768
147
+ Config = namedtuple(
148
+ "Config", ["in_dim", "hid_dim", "dropout", "out_dim", "token_num"]
149
+ )
150
+ config = Config(
151
+ in_dim=hidden_size,
152
+ token_num=token_num,
153
+ hid_dim=512,
154
+ dropout=0.2,
155
+ out_dim=300,
156
+ )
157
+ self.config = config
158
+ self.net = mmdPreModel(
159
+ config=config, num_mlp=0, transformer_flag=True, num_hidden_layers=1
160
+ )
161
+ checkpoint_filename = "./net.pt"
162
+ checkpoint = torch.load(checkpoint_filename, map_location=DEVICE)
163
+ self.net.load_state_dict(checkpoint["net"])
164
+ self.sigma, self.sigma0_u, self.ep = (
165
+ checkpoint["sigma"],
166
+ checkpoint["sigma0_u"],
167
+ checkpoint["ep"],
168
+ )
169
+ self.net = self.net.to(DEVICE)
170
+ self.sigma, self.sigma0_u, self.ep = (
171
+ self.sigma.to(DEVICE),
172
+ self.sigma0_u.to(DEVICE),
173
+ self.ep.to(DEVICE),
174
+ )
175
+
176
+
177
+ net = NetLoader()