Update modeling_autoencoder.py
Browse files- modeling_autoencoder.py +6 -3
modeling_autoencoder.py
CHANGED
@@ -206,10 +206,13 @@ def create_layers(
|
|
206 |
elif layer_types in ['lstm', 'rnn', 'gru']:
|
207 |
rnn_layer = getattr(nn, layer_types.upper()) # Dynamically get the RNN layer class.
|
208 |
half_output_dim = output_dim // (2 if bidirectional else 1)
|
209 |
-
if
|
|
|
|
|
|
|
|
|
|
|
210 |
layers.append(rnn_layer(input_dim, half_output_dim, batch_first=True, bidirectional=bidirectional))
|
211 |
-
else:
|
212 |
-
layers.append(rnn_layer(input_dim*2, half_output_dim, batch_first=True, bidirectional=bidirectional))
|
213 |
# Add dropout layer between layers, except for the last layer.
|
214 |
if (idx != num_layers - 1) and (dropout_rate is not None):
|
215 |
layers.append(nn.Dropout(dropout_rate))
|
|
|
206 |
elif layer_types in ['lstm', 'rnn', 'gru']:
|
207 |
rnn_layer = getattr(nn, layer_types.upper()) # Dynamically get the RNN layer class.
|
208 |
half_output_dim = output_dim // (2 if bidirectional else 1)
|
209 |
+
if model_section == "decoder":
|
210 |
+
if idx == 0:
|
211 |
+
layers.append(rnn_layer(input_dim, half_output_dim, batch_first=True, bidirectional=bidirectional))
|
212 |
+
else:
|
213 |
+
layers.append(rnn_layer(input_dim*2, half_output_dim, batch_first=True, bidirectional=bidirectional))
|
214 |
+
else:
|
215 |
layers.append(rnn_layer(input_dim, half_output_dim, batch_first=True, bidirectional=bidirectional))
|
|
|
|
|
216 |
# Add dropout layer between layers, except for the last layer.
|
217 |
if (idx != num_layers - 1) and (dropout_rate is not None):
|
218 |
layers.append(nn.Dropout(dropout_rate))
|