Fraser-Greenlee commited on
Commit
19dc6e3
·
1 Parent(s): 3bdb76c

rm old code

Browse files
dreamcoder/deprecated/__init__.py DELETED
File without changes
dreamcoder/deprecated/network.py DELETED
@@ -1,479 +0,0 @@
1
- """
2
- Deprecated network.py module. This file only exists to support backwards-compatibility
3
- with old pickle files. See lib/__init__.py for more information.
4
- """
5
-
6
- from __future__ import print_function
7
-
8
- import torch
9
- import torch.nn as nn
10
- import torch.nn.functional as F
11
- from torch.autograd import Variable
12
- from torch.nn.parameter import Parameter
13
-
14
-
15
- # UPGRADING TO INPUT -> OUTPUT -> TARGET
16
- # Todo:
17
- # [X] Output attending to input
18
- # [X] Target attending to output
19
- # [ ] check passing hidden state between encoders/decoder (+ pass c?)
20
- # [ ] add v_output
21
-
22
-
23
- def choose(matrix, idxs):
24
- if isinstance(idxs, Variable):
25
- idxs = idxs.data
26
- assert(matrix.ndimension() == 2)
27
- unrolled_idxs = idxs + \
28
- torch.arange(0, matrix.size(0)).type_as(idxs) * matrix.size(1)
29
- return matrix.view(matrix.nelement())[unrolled_idxs]
30
-
31
-
32
- class Network(nn.Module):
33
- """
34
- Todo:
35
- - Beam search
36
- - check if this is right? attend during P->FC rather than during softmax->P?
37
- - allow length 0 inputs/targets
38
- - give n_examples as input to FC
39
- - Initialise new weights randomly, rather than as zeroes
40
- """
41
-
42
- def __init__(
43
- self,
44
- input_vocabulary,
45
- target_vocabulary,
46
- hidden_size=512,
47
- embedding_size=128,
48
- cell_type="LSTM"):
49
- """
50
- :param list input_vocabulary: list of possible inputs
51
- :param list target_vocabulary: list of possible targets
52
- """
53
- super(Network, self).__init__()
54
- self.h_input_encoder_size = hidden_size
55
- self.h_output_encoder_size = hidden_size
56
- self.h_decoder_size = hidden_size
57
- self.embedding_size = embedding_size
58
- self.input_vocabulary = input_vocabulary
59
- self.target_vocabulary = target_vocabulary
60
- # Number of tokens in input vocabulary
61
- self.v_input = len(input_vocabulary)
62
- # Number of tokens in target vocabulary
63
- self.v_target = len(target_vocabulary)
64
-
65
- self.cell_type = cell_type
66
- if cell_type == 'GRU':
67
- self.input_encoder_cell = nn.GRUCell(
68
- input_size=self.v_input + 1,
69
- hidden_size=self.h_input_encoder_size,
70
- bias=True)
71
- self.input_encoder_init = Parameter(
72
- torch.rand(1, self.h_input_encoder_size))
73
- self.output_encoder_cell = nn.GRUCell(
74
- input_size=self.v_input +
75
- 1 +
76
- self.h_input_encoder_size,
77
- hidden_size=self.h_output_encoder_size,
78
- bias=True)
79
- self.decoder_cell = nn.GRUCell(
80
- input_size=self.v_target + 1,
81
- hidden_size=self.h_decoder_size,
82
- bias=True)
83
- if cell_type == 'LSTM':
84
- self.input_encoder_cell = nn.LSTMCell(
85
- input_size=self.v_input + 1,
86
- hidden_size=self.h_input_encoder_size,
87
- bias=True)
88
- self.input_encoder_init = nn.ParameterList([Parameter(torch.rand(
89
- 1, self.h_input_encoder_size)), Parameter(torch.rand(1, self.h_input_encoder_size))])
90
- self.output_encoder_cell = nn.LSTMCell(
91
- input_size=self.v_input +
92
- 1 +
93
- self.h_input_encoder_size,
94
- hidden_size=self.h_output_encoder_size,
95
- bias=True)
96
- self.output_encoder_init_c = Parameter(
97
- torch.rand(1, self.h_output_encoder_size))
98
- self.decoder_cell = nn.LSTMCell(
99
- input_size=self.v_target + 1,
100
- hidden_size=self.h_decoder_size,
101
- bias=True)
102
- self.decoder_init_c = Parameter(torch.rand(1, self.h_decoder_size))
103
-
104
- self.W = nn.Linear(
105
- self.h_output_encoder_size +
106
- self.h_decoder_size,
107
- self.embedding_size)
108
- self.V = nn.Linear(self.embedding_size, self.v_target + 1)
109
- self.input_A = nn.Bilinear(
110
- self.h_input_encoder_size,
111
- self.h_output_encoder_size,
112
- 1,
113
- bias=False)
114
- self.output_A = nn.Bilinear(
115
- self.h_output_encoder_size,
116
- self.h_decoder_size,
117
- 1,
118
- bias=False)
119
- self.input_EOS = torch.zeros(1, self.v_input + 1)
120
- self.input_EOS[:, -1] = 1
121
- self.input_EOS = Parameter(self.input_EOS)
122
- self.output_EOS = torch.zeros(1, self.v_input + 1)
123
- self.output_EOS[:, -1] = 1
124
- self.output_EOS = Parameter(self.output_EOS)
125
- self.target_EOS = torch.zeros(1, self.v_target + 1)
126
- self.target_EOS[:, -1] = 1
127
- self.target_EOS = Parameter(self.target_EOS)
128
-
129
- def __getstate__(self):
130
- if hasattr(self, 'opt'):
131
- return dict([(k, v) for k, v in self.__dict__.items(
132
- ) if k is not 'opt'] + [('optstate', self.opt.state_dict())])
133
- # return {**{k:v for k,v in self.__dict__.items() if k is not 'opt'},
134
- # 'optstate': self.opt.state_dict()}
135
- else:
136
- return self.__dict__
137
-
138
- def __setstate__(self, state):
139
- self.__dict__.update(state)
140
- # Legacy:
141
- if isinstance(self.input_encoder_init, tuple):
142
- self.input_encoder_init = nn.ParameterList(
143
- list(self.input_encoder_init))
144
-
145
- def clear_optimiser(self):
146
- if hasattr(self, 'opt'):
147
- del self.opt
148
- if hasattr(self, 'optstate'):
149
- del self.optstate
150
-
151
- def get_optimiser(self):
152
- self.opt = torch.optim.Adam(self.parameters(), lr=0.001)
153
- if hasattr(self, 'optstate'):
154
- self.opt.load_state_dict(self.optstate)
155
-
156
- def optimiser_step(self, inputs, outputs, target):
157
- if not hasattr(self, 'opt'):
158
- self.get_optimiser()
159
- score = self.score(inputs, outputs, target, autograd=True).mean()
160
- (-score).backward()
161
- self.opt.step()
162
- self.opt.zero_grad()
163
- return score.data[0]
164
-
165
- def set_target_vocabulary(self, target_vocabulary):
166
- if target_vocabulary == self.target_vocabulary:
167
- return
168
-
169
- V_weight = []
170
- V_bias = []
171
- decoder_ih = []
172
-
173
- for i in range(len(target_vocabulary)):
174
- if target_vocabulary[i] in self.target_vocabulary:
175
- j = self.target_vocabulary.index(target_vocabulary[i])
176
- V_weight.append(self.V.weight.data[j:j + 1])
177
- V_bias.append(self.V.bias.data[j:j + 1])
178
- decoder_ih.append(self.decoder_cell.weight_ih.data[:, j:j + 1])
179
- else:
180
- V_weight.append(torch.zeros(1, self.V.weight.size(1)))
181
- V_bias.append(torch.ones(1) * -10)
182
- decoder_ih.append(
183
- torch.zeros(
184
- self.decoder_cell.weight_ih.data.size(0), 1))
185
-
186
- V_weight.append(self.V.weight.data[-1:])
187
- V_bias.append(self.V.bias.data[-1:])
188
- decoder_ih.append(self.decoder_cell.weight_ih.data[:, -1:])
189
-
190
- self.target_vocabulary = target_vocabulary
191
- self.v_target = len(target_vocabulary)
192
- self.target_EOS.data = torch.zeros(1, self.v_target + 1)
193
- self.target_EOS.data[:, -1] = 1
194
-
195
- self.V.weight.data = torch.cat(V_weight, dim=0)
196
- self.V.bias.data = torch.cat(V_bias, dim=0)
197
- self.V.out_features = self.V.bias.data.size(0)
198
-
199
- self.decoder_cell.weight_ih.data = torch.cat(decoder_ih, dim=1)
200
- self.decoder_cell.input_size = self.decoder_cell.weight_ih.data.size(1)
201
-
202
- self.clear_optimiser()
203
-
204
- def input_encoder_get_init(self, batch_size):
205
- if self.cell_type == "GRU":
206
- return self.input_encoder_init.repeat(batch_size, 1)
207
- if self.cell_type == "LSTM":
208
- return tuple(x.repeat(batch_size, 1)
209
- for x in self.input_encoder_init)
210
-
211
- def output_encoder_get_init(self, input_encoder_h):
212
- if self.cell_type == "GRU":
213
- return input_encoder_h
214
- if self.cell_type == "LSTM":
215
- return (
216
- input_encoder_h,
217
- self.output_encoder_init_c.repeat(
218
- input_encoder_h.size(0),
219
- 1))
220
-
221
- def decoder_get_init(self, output_encoder_h):
222
- if self.cell_type == "GRU":
223
- return output_encoder_h
224
- if self.cell_type == "LSTM":
225
- return (
226
- output_encoder_h,
227
- self.decoder_init_c.repeat(
228
- output_encoder_h.size(0),
229
- 1))
230
-
231
- def cell_get_h(self, cell_state):
232
- if self.cell_type == "GRU":
233
- return cell_state
234
- if self.cell_type == "LSTM":
235
- return cell_state[0]
236
-
237
- def score(self, inputs, outputs, target, autograd=False):
238
- inputs = self.inputsToTensors(inputs)
239
- outputs = self.inputsToTensors(outputs)
240
- target = self.targetToTensor(target)
241
- target, score = self.run(inputs, outputs, target=target, mode="score")
242
- # target = self.tensorToOutput(target)
243
- if autograd:
244
- return score
245
- else:
246
- return score.data
247
-
248
- def sample(self, inputs, outputs):
249
- inputs = self.inputsToTensors(inputs)
250
- outputs = self.inputsToTensors(outputs)
251
- target, score = self.run(inputs, outputs, mode="sample")
252
- target = self.tensorToOutput(target)
253
- return target
254
-
255
- def sampleAndScore(self, inputs, outputs, nRepeats=None):
256
- inputs = self.inputsToTensors(inputs)
257
- outputs = self.inputsToTensors(outputs)
258
- if nRepeats is None:
259
- target, score = self.run(inputs, outputs, mode="sample")
260
- target = self.tensorToOutput(target)
261
- return target, score.data
262
- else:
263
- target = []
264
- score = []
265
- for i in range(nRepeats):
266
- # print("repeat %d" % i)
267
- t, s = self.run(inputs, outputs, mode="sample")
268
- t = self.tensorToOutput(t)
269
- target.extend(t)
270
- score.extend(list(s.data))
271
- return target, score
272
-
273
- def run(self, inputs, outputs, target=None, mode="sample"):
274
- """
275
- :param mode: "score" returns log p(target|input), "sample" returns target ~ p(-|input)
276
- :param List[LongTensor] inputs: n_examples * (max_length_input * batch_size)
277
- :param List[LongTensor] target: max_length_target * batch_size
278
- """
279
- assert((mode == "score" and target is not None) or mode == "sample")
280
-
281
- n_examples = len(inputs)
282
- max_length_input = [inputs[j].size(0) for j in range(n_examples)]
283
- max_length_output = [outputs[j].size(0) for j in range(n_examples)]
284
- max_length_target = target.size(0) if target is not None else 10
285
- batch_size = inputs[0].size(1)
286
-
287
- score = Variable(torch.zeros(batch_size))
288
- inputs_scatter = [Variable(torch.zeros(max_length_input[j], batch_size, self.v_input + 1).scatter_(
289
- 2, inputs[j][:, :, None], 1)) for j in range(n_examples)] # n_examples * (max_length_input * batch_size * v_input+1)
290
- outputs_scatter = [Variable(torch.zeros(max_length_output[j], batch_size, self.v_input + 1).scatter_(
291
- 2, outputs[j][:, :, None], 1)) for j in range(n_examples)] # n_examples * (max_length_output * batch_size * v_input+1)
292
- if target is not None:
293
- target_scatter = Variable(torch.zeros(max_length_target,
294
- batch_size,
295
- self.v_target + 1).scatter_(2,
296
- target[:,
297
- :,
298
- None],
299
- 1)) # max_length_target * batch_size * v_target+1
300
-
301
- # -------------- Input Encoder -------------
302
-
303
- # n_examples * (max_length_input * batch_size * h_encoder_size)
304
- input_H = []
305
- input_embeddings = [] # h for example at INPUT_EOS
306
- # 0 until (and including) INPUT_EOS, then -inf
307
- input_attention_mask = []
308
- for j in range(n_examples):
309
- active = torch.Tensor(max_length_input[j], batch_size).byte()
310
- active[0, :] = 1
311
- state = self.input_encoder_get_init(batch_size)
312
- hs = []
313
- for i in range(max_length_input[j]):
314
- state = self.input_encoder_cell(
315
- inputs_scatter[j][i, :, :], state)
316
- if i + 1 < max_length_input[j]:
317
- active[i + 1, :] = active[i, :] * \
318
- (inputs[j][i, :] != self.v_input)
319
- h = self.cell_get_h(state)
320
- hs.append(h[None, :, :])
321
- input_H.append(torch.cat(hs, 0))
322
- embedding_idx = active.sum(0).long() - 1
323
- embedding = input_H[j].gather(0, Variable(
324
- embedding_idx[None, :, None].repeat(1, 1, self.h_input_encoder_size)))[0]
325
- input_embeddings.append(embedding)
326
- input_attention_mask.append(Variable(active.float().log()))
327
-
328
- # -------------- Output Encoder -------------
329
-
330
- def input_attend(j, h_out):
331
- """
332
- 'general' attention from https://arxiv.org/pdf/1508.04025.pdf
333
- :param j: Index of example
334
- :param h_out: batch_size * h_output_encoder_size
335
- """
336
- scores = self.input_A(
337
- input_H[j].view(
338
- max_length_input[j] * batch_size,
339
- self.h_input_encoder_size),
340
- h_out.view(
341
- batch_size,
342
- self.h_output_encoder_size).repeat(
343
- max_length_input[j],
344
- 1)).view(
345
- max_length_input[j],
346
- batch_size) + input_attention_mask[j]
347
- c = (F.softmax(scores[:, :, None], dim=0) * input_H[j]).sum(0)
348
- return c
349
-
350
- # n_examples * (max_length_input * batch_size * h_encoder_size)
351
- output_H = []
352
- output_embeddings = [] # h for example at INPUT_EOS
353
- # 0 until (and including) INPUT_EOS, then -inf
354
- output_attention_mask = []
355
- for j in range(n_examples):
356
- active = torch.Tensor(max_length_output[j], batch_size).byte()
357
- active[0, :] = 1
358
- state = self.output_encoder_get_init(input_embeddings[j])
359
- hs = []
360
- h = self.cell_get_h(state)
361
- for i in range(max_length_output[j]):
362
- state = self.output_encoder_cell(torch.cat(
363
- [outputs_scatter[j][i, :, :], input_attend(j, h)], 1), state)
364
- if i + 1 < max_length_output[j]:
365
- active[i + 1, :] = active[i, :] * \
366
- (outputs[j][i, :] != self.v_input)
367
- h = self.cell_get_h(state)
368
- hs.append(h[None, :, :])
369
- output_H.append(torch.cat(hs, 0))
370
- embedding_idx = active.sum(0).long() - 1
371
- embedding = output_H[j].gather(0, Variable(
372
- embedding_idx[None, :, None].repeat(1, 1, self.h_output_encoder_size)))[0]
373
- output_embeddings.append(embedding)
374
- output_attention_mask.append(Variable(active.float().log()))
375
-
376
- # ------------------ Decoder -----------------
377
-
378
- def output_attend(j, h_dec):
379
- """
380
- 'general' attention from https://arxiv.org/pdf/1508.04025.pdf
381
- :param j: Index of example
382
- :param h_dec: batch_size * h_decoder_size
383
- """
384
- scores = self.output_A(
385
- output_H[j].view(
386
- max_length_output[j] * batch_size,
387
- self.h_output_encoder_size),
388
- h_dec.view(
389
- batch_size,
390
- self.h_decoder_size).repeat(
391
- max_length_output[j],
392
- 1)).view(
393
- max_length_output[j],
394
- batch_size) + output_attention_mask[j]
395
- c = (F.softmax(scores[:, :, None], dim=0) * output_H[j]).sum(0)
396
- return c
397
-
398
- # Multi-example pooling: Figure 3, https://arxiv.org/pdf/1703.07469.pdf
399
- target = target if mode == "score" else torch.zeros(
400
- max_length_target, batch_size).long()
401
- decoder_states = [
402
- self.decoder_get_init(
403
- output_embeddings[j]) for j in range(n_examples)] # P
404
- active = torch.ones(batch_size).byte()
405
- for i in range(max_length_target):
406
- FC = []
407
- for j in range(n_examples):
408
- h = self.cell_get_h(decoder_states[j])
409
- p_aug = torch.cat([h, output_attend(j, h)], 1)
410
- FC.append(F.tanh(self.W(p_aug)[None, :, :]))
411
- # batch_size * embedding_size
412
- m = torch.max(torch.cat(FC, 0), 0)[0]
413
- logsoftmax = F.log_softmax(self.V(m), dim=1)
414
- if mode == "sample":
415
- target[i, :] = torch.multinomial(
416
- logsoftmax.data.exp(), 1)[:, 0]
417
- score = score + \
418
- choose(logsoftmax, target[i, :]) * Variable(active.float())
419
- active *= (target[i, :] != self.v_target)
420
- for j in range(n_examples):
421
- if mode == "score":
422
- target_char_scatter = target_scatter[i, :, :]
423
- elif mode == "sample":
424
- target_char_scatter = Variable(torch.zeros(
425
- batch_size, self.v_target + 1).scatter_(1, target[i, :, None], 1))
426
- decoder_states[j] = self.decoder_cell(
427
- target_char_scatter, decoder_states[j])
428
- return target, score
429
-
430
- def inputsToTensors(self, inputss):
431
- """
432
- :param inputss: size = nBatch * nExamples
433
- """
434
- tensors = []
435
- for j in range(len(inputss[0])):
436
- inputs = [x[j] for x in inputss]
437
- maxlen = max(len(s) for s in inputs)
438
- t = torch.ones(
439
- 1 if maxlen == 0 else maxlen + 1,
440
- len(inputs)).long() * self.v_input
441
- for i in range(len(inputs)):
442
- s = inputs[i]
443
- if len(s) > 0:
444
- t[:len(s), i] = torch.LongTensor(
445
- [self.input_vocabulary.index(x) for x in s])
446
- tensors.append(t)
447
- return tensors
448
-
449
- def targetToTensor(self, targets):
450
- """
451
- :param targets:
452
- """
453
- maxlen = max(len(s) for s in targets)
454
- t = torch.ones(
455
- 1 if maxlen == 0 else maxlen + 1,
456
- len(targets)).long() * self.v_target
457
- for i in range(len(targets)):
458
- s = targets[i]
459
- if len(s) > 0:
460
- t[:len(s), i] = torch.LongTensor(
461
- [self.target_vocabulary.index(x) for x in s])
462
- return t
463
-
464
- def tensorToOutput(self, tensor):
465
- """
466
- :param tensor: max_length * batch_size
467
- """
468
- out = []
469
- for i in range(tensor.size(1)):
470
- l = tensor[:, i].tolist()
471
- if l[0] == self.v_target:
472
- out.append([])
473
- elif self.v_target in l:
474
- final = tensor[:, i].tolist().index(self.v_target)
475
- out.append([self.target_vocabulary[x]
476
- for x in tensor[:final, i]])
477
- else:
478
- out.append([self.target_vocabulary[x] for x in tensor[:, i]])
479
- return out